source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
geemap.py
|
"""Main module for interactive mapping using Google Earth Engine Python API and ipyleaflet.
Keep in mind that Earth Engine functions use both camel case and snake case, such as setOptions(), setCenter(), centerObject(), addLayer().
ipyleaflet functions use snake case, such as add_tile_layer(), add_wms_layer(), add_minimap().
"""
import math
import os
import time
import ee
import ipyevents
import ipyleaflet
import ipywidgets as widgets
from bqplot import pyplot as plt
from ipyfilechooser import FileChooser
from ipyleaflet import Marker, MarkerCluster, TileLayer, WidgetControl
from IPython.display import display
from .basemaps import basemaps, basemap_tiles
from .common import *
from .conversion import *
from .legends import builtin_legends
class Map(ipyleaflet.Map):
"""The Map class inherits from ipyleaflet.Map. The arguments you can pass to the Map can be found at https://ipyleaflet.readthedocs.io/en/latest/api_reference/map.html. By default, the Map will add Google Maps as the basemap. Set add_google_map = False to use OpenStreetMap as the basemap.
Returns:
object: ipyleaflet map object.
"""
def __init__(self, **kwargs):
# Authenticates Earth Engine and initializes an Earth Engine session
if "ee_initialize" not in kwargs.keys():
kwargs["ee_initialize"] = True
if kwargs["ee_initialize"]:
ee_initialize()
# Default map center location (lat, lon) and zoom level
latlon = [40, -100]
zoom = 4
# Interchangeable parameters between ipyleaflet and folium
if "height" not in kwargs.keys():
kwargs["height"] = "600px"
if "location" in kwargs.keys():
kwargs["center"] = kwargs["location"]
kwargs.pop("location")
if "center" not in kwargs.keys():
kwargs["center"] = latlon
if "zoom_start" in kwargs.keys():
kwargs["zoom"] = kwargs["zoom_start"]
kwargs.pop("zoom_start")
if "zoom" not in kwargs.keys():
kwargs["zoom"] = zoom
if "add_google_map" not in kwargs.keys() and "basemap" not in kwargs.keys():
kwargs["add_google_map"] = True
if "scroll_wheel_zoom" not in kwargs.keys():
kwargs["scroll_wheel_zoom"] = True
if "lite_mode" not in kwargs.keys():
kwargs["lite_mode"] = False
if kwargs["lite_mode"]:
kwargs["data_ctrl"] = False
kwargs["zoom_ctrl"] = True
kwargs["fullscreen_ctrl"] = False
kwargs["draw_ctrl"] = False
kwargs["search_ctrl"] = False
kwargs["measure_ctrl"] = False
kwargs["scale_ctrl"] = False
kwargs["layer_ctrl"] = False
kwargs["toolbar_ctrl"] = False
kwargs["attribution_ctrl"] = False
if "data_ctrl" not in kwargs.keys():
kwargs["data_ctrl"] = True
if "zoom_ctrl" not in kwargs.keys():
kwargs["zoom_ctrl"] = True
if "fullscreen_ctrl" not in kwargs.keys():
kwargs["fullscreen_ctrl"] = True
if "draw_ctrl" not in kwargs.keys():
kwargs["draw_ctrl"] = True
if "search_ctrl" not in kwargs.keys():
kwargs["search_ctrl"] = False
if "measure_ctrl" not in kwargs.keys():
kwargs["measure_ctrl"] = True
if "scale_ctrl" not in kwargs.keys():
kwargs["scale_ctrl"] = True
if "layer_ctrl" not in kwargs.keys():
kwargs["layer_ctrl"] = False
if "toolbar_ctrl" not in kwargs.keys():
kwargs["toolbar_ctrl"] = True
if "attribution_ctrl" not in kwargs.keys():
kwargs["attribution_ctrl"] = True
if "use_voila" not in kwargs.keys():
kwargs["use_voila"] = False
if (
"basemap" in kwargs.keys()
and isinstance(kwargs["basemap"], str)
and kwargs["basemap"] in basemaps.keys()
):
kwargs["basemap"] = basemap_tiles[kwargs["basemap"]]
if os.environ.get("USE_VOILA") is not None:
kwargs["use_voila"] = True
# Inherits the ipyleaflet Map class
super().__init__(**kwargs)
self.baseclass = "ipyleaflet"
self.layout.height = kwargs["height"]
self.clear_controls()
# The number of shapes drawn by the user using the DrawControl
self.draw_count = 0
# The list of Earth Engine Geometry objects converted from geojson
self.draw_features = []
# The Earth Engine Geometry object converted from the last drawn feature
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.last_ee_data = None
self.last_ee_layer = None
self.roi_start = False
self.roi_end = False
if kwargs["ee_initialize"]:
self.roi_reducer = ee.Reducer.mean()
self.roi_reducer_scale = None
# List for storing pixel values and locations based on user-drawn geometries.
self.chart_points = []
self.chart_values = []
self.chart_labels = None
self.plot_widget = None # The plot widget for plotting Earth Engine data
self.plot_control = None # The plot control for interacting plotting
self.random_marker = None
self.legend_widget = None
self.legend_control = None
self.colorbar = None
self.ee_layers = []
self.ee_layer_names = []
self.ee_raster_layers = []
self.ee_raster_layer_names = []
self.ee_vector_layers = []
self.ee_vector_layer_names = []
self.ee_layer_dict = {}
self.search_locations = None
self.search_loc_marker = None
self.search_loc_geom = None
self.search_datasets = None
self.screenshot = None
self.toolbar = None
self.toolbar_button = None
self.vis_control = None
self.vis_widget = None
self.colorbar_ctrl = None
self.colorbar_widget = None
self.tool_output = None
self.tool_output_ctrl = None
self.layer_control = None
self.convert_ctrl = None
# Adds search button and search box
search_button = widgets.ToggleButton(
value=False,
tooltip="Search location/data",
icon="globe",
layout=widgets.Layout(
width="28px", height="28px", padding="0px 0px 0px 4px"
),
)
search_type = widgets.ToggleButtons(
options=["name/address", "lat-lon", "data"],
tooltips=[
"Search by place name or address",
"Search by lat-lon coordinates",
"Search Earth Engine data catalog",
],
)
search_type.style.button_width = "110px"
search_box = widgets.Text(
placeholder="Search by place name or address",
tooltip="Search location",
layout=widgets.Layout(width="340px"),
)
search_output = widgets.Output(
layout={
"max_width": "340px",
"max_height": "250px",
"overflow": "scroll",
}
)
search_results = widgets.RadioButtons()
assets_dropdown = widgets.Dropdown(
options=[],
layout=widgets.Layout(min_width="279px", max_width="279px"),
)
import_btn = widgets.Button(
description="import",
button_style="primary",
tooltip="Click to import the selected asset",
layout=widgets.Layout(min_width="57px", max_width="57px"),
)
def import_btn_clicked(b):
if assets_dropdown.value != "":
datasets = self.search_datasets
dataset = datasets[assets_dropdown.index]
dataset_uid = "dataset_" + random_string(string_length=3)
line1 = "{} = {}\n".format(dataset_uid, dataset["ee_id_snippet"])
line2 = "Map.addLayer(" + dataset_uid + ', {}, "' + dataset["id"] + '")'
contents = "".join([line1, line2])
create_code_cell(contents)
import_btn.on_click(import_btn_clicked)
html_widget = widgets.HTML()
def dropdown_change(change):
dropdown_index = assets_dropdown.index
if dropdown_index is not None and dropdown_index >= 0:
with search_output:
search_output.clear_output(wait=True)
print("Loading ...")
datasets = self.search_datasets
dataset = datasets[dropdown_index]
dataset_html = ee_data_html(dataset)
html_widget.value = dataset_html
search_output.clear_output(wait=True)
display(html_widget)
assets_dropdown.observe(dropdown_change, names="value")
assets_combo = widgets.HBox()
assets_combo.children = [import_btn, assets_dropdown]
def search_result_change(change):
result_index = search_results.index
locations = self.search_locations
location = locations[result_index]
latlon = (location.lat, location.lng)
self.search_loc_geom = ee.Geometry.Point(location.lng, location.lat)
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.observe(search_result_change, names="value")
def search_btn_click(change):
if change["new"]:
search_widget.children = [search_button, search_result_widget]
search_type.value = "name/address"
else:
search_widget.children = [search_button]
search_result_widget.children = [search_type, search_box]
search_button.observe(search_btn_click, "value")
def search_type_changed(change):
search_box.value = ""
search_output.clear_output()
if change["new"] == "name/address":
search_box.placeholder = "Search by place name or address, e.g., Paris"
assets_dropdown.options = []
search_result_widget.children = [
search_type,
search_box,
search_output,
]
elif change["new"] == "lat-lon":
search_box.placeholder = "Search by lat-lon, e.g., 40, -100"
assets_dropdown.options = []
search_result_widget.children = [
search_type,
search_box,
search_output,
]
elif change["new"] == "data":
search_box.placeholder = (
"Search GEE data catalog by keywords, e.g., elevation"
)
search_result_widget.children = [
search_type,
search_box,
assets_combo,
search_output,
]
search_type.observe(search_type_changed, names="value")
def search_box_callback(text):
if text.value != "":
if search_type.value == "name/address":
g = geocode(text.value)
elif search_type.value == "lat-lon":
g = geocode(text.value, reverse=True)
if g is None and latlon_from_text(text.value):
search_output.clear_output()
latlon = latlon_from_text(text.value)
self.search_loc_geom = ee.Geometry.Point(latlon[1], latlon[0])
if self.search_loc_marker is None:
marker = Marker(
location=latlon,
draggable=False,
name="Search location",
)
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
with search_output:
print("No address found for {}".format(latlon))
return
elif search_type.value == "data":
search_output.clear_output()
with search_output:
print("Searching ...")
self.default_style = {"cursor": "wait"}
ee_assets = search_ee_data(text.value)
self.search_datasets = ee_assets
asset_titles = [x["title"] for x in ee_assets]
assets_dropdown.options = asset_titles
search_output.clear_output()
if len(ee_assets) > 0:
html_widget.value = ee_data_html(ee_assets[0])
with search_output:
display(html_widget)
self.default_style = {"cursor": "default"}
return
self.search_locations = g
if g is not None and len(g) > 0:
top_loc = g[0]
latlon = (top_loc.lat, top_loc.lng)
self.search_loc_geom = ee.Geometry.Point(top_loc.lng, top_loc.lat)
if self.search_loc_marker is None:
marker = Marker(
location=latlon,
draggable=False,
name="Search location",
)
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.options = [x.address for x in g]
search_result_widget.children = [
search_type,
search_box,
search_output,
]
with search_output:
search_output.clear_output(wait=True)
display(search_results)
else:
with search_output:
search_output.clear_output()
print("No results could be found.")
search_box.on_submit(search_box_callback)
search_result_widget = widgets.VBox([search_type, search_box])
search_widget = widgets.HBox([search_button])
search_event = ipyevents.Event(
source=search_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_search_event(event):
if event["type"] == "mouseenter":
search_widget.children = [search_button, search_result_widget]
# search_type.value = "name/address"
elif event["type"] == "mouseleave":
if not search_button.value:
search_widget.children = [search_button]
search_result_widget.children = [search_type, search_box]
search_event.on_dom_event(handle_search_event)
data_control = WidgetControl(widget=search_widget, position="topleft")
if kwargs.get("data_ctrl"):
self.add_control(control=data_control)
search_marker = Marker(
icon=ipyleaflet.AwesomeIcon(
name="check", marker_color="green", icon_color="darkgreen"
)
)
search = ipyleaflet.SearchControl(
position="topleft",
url="https://nominatim.openstreetmap.org/search?format=json&q={s}",
zoom=5,
property_name="display_name",
marker=search_marker,
)
if kwargs.get("search_ctrl"):
self.add_control(search)
if kwargs.get("zoom_ctrl"):
self.add_control(ipyleaflet.ZoomControl(position="topleft"))
if kwargs.get("layer_ctrl"):
layer_control = ipyleaflet.LayersControl(position="topright")
self.layer_control = layer_control
self.add_control(layer_control)
if kwargs.get("scale_ctrl"):
scale = ipyleaflet.ScaleControl(position="bottomleft")
self.scale_control = scale
self.add_control(scale)
if kwargs.get("fullscreen_ctrl"):
fullscreen = ipyleaflet.FullScreenControl()
self.fullscreen_control = fullscreen
self.add_control(fullscreen)
if kwargs.get("measure_ctrl"):
measure = ipyleaflet.MeasureControl(
position="bottomleft",
active_color="orange",
primary_length_unit="kilometers",
)
self.measure_control = measure
self.add_control(measure)
if kwargs.get("add_google_map"):
self.add_layer(basemap_tiles["ROADMAP"])
if kwargs.get("attribution_ctrl"):
self.add_control(ipyleaflet.AttributionControl(position="bottomright"))
draw_control = ipyleaflet.DrawControl(
marker={"shapeOptions": {"color": "#3388ff"}},
rectangle={"shapeOptions": {"color": "#3388ff"}},
circle={"shapeOptions": {"color": "#3388ff"}},
circlemarker={},
edit=True,
remove=True,
)
draw_control_lite = ipyleaflet.DrawControl(
marker={},
rectangle={"shapeOptions": {"color": "#3388ff"}},
circle={"shapeOptions": {"color": "#3388ff"}},
circlemarker={},
polyline={},
polygon={},
edit=False,
remove=False,
)
# Handles draw events
def handle_draw(target, action, geo_json):
try:
self.roi_start = True
geom = geojson_to_ee(geo_json, False)
self.user_roi = geom
feature = ee.Feature(geom)
self.draw_last_json = geo_json
self.draw_last_feature = feature
if action == "deleted" and len(self.draw_features) > 0:
self.draw_features.remove(feature)
self.draw_count -= 1
else:
self.draw_features.append(feature)
self.draw_count += 1
collection = ee.FeatureCollection(self.draw_features)
self.user_rois = collection
ee_draw_layer = ee_tile_layer(
collection, {"color": "blue"}, "Drawn Features", False, 0.5
)
draw_layer_index = self.find_layer_index("Drawn Features")
if draw_layer_index == -1:
self.add_layer(ee_draw_layer)
self.draw_layer = ee_draw_layer
else:
self.substitute_layer(self.draw_layer, ee_draw_layer)
self.draw_layer = ee_draw_layer
self.roi_end = True
self.roi_start = False
except Exception as e:
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.user_roi = None
self.roi_start = False
self.roi_end = False
print("There was an error creating Earth Engine Feature.")
raise Exception(e)
draw_control.on_draw(handle_draw)
if kwargs.get("draw_ctrl"):
self.add_control(draw_control)
self.draw_control = draw_control
self.draw_control_lite = draw_control_lite
# Dropdown widget for plotting
self.plot_dropdown_control = None
self.plot_dropdown_widget = None
self.plot_options = {}
self.plot_marker_cluster = MarkerCluster(name="Marker Cluster")
self.plot_coordinates = []
self.plot_markers = []
self.plot_last_click = []
self.plot_all_clicks = []
self.plot_checked = False
self.inspector_checked = False
inspector_output = widgets.Output(layout={"border": "1px solid black"})
inspector_output_control = WidgetControl(
widget=inspector_output, position="topright"
)
tool_output = widgets.Output()
self.tool_output = tool_output
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=["HTML", "PNG", "JPG"],
tooltips=[
"Save the map as an HTML file",
"Take a screenshot and save as a PNG file",
"Take a screenshot and save as a JPG file",
],
)
file_chooser = FileChooser(os.getcwd())
file_chooser.default_filename = "my_map.html"
file_chooser.use_dir_icons = True
ok_cancel = widgets.ToggleButtons(
value=None,
options=["OK", "Cancel"],
tooltips=["OK", "Cancel"],
button_style="primary",
)
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change["new"] == "HTML":
file_chooser.default_filename = "my_map.html"
elif change["new"] == "PNG":
file_chooser.default_filename = "my_map.png"
elif change["new"] == "JPG":
file_chooser.default_filename = "my_map.jpg"
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change["new"] == "OK":
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == "HTML" and ext.upper() == ".HTML":
tool_output.clear_output()
self.to_html(file_path)
elif save_type.value == "PNG" and ext.upper() == ".PNG":
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
elif save_type.value == "JPG" and ext.upper() == ".JPG":
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type."
)
save_map_widget.children = [save_type, file_chooser, label]
self.toolbar_reset()
elif change["new"] == "Cancel":
tool_output.clear_output()
self.toolbar_reset()
save_type.observe(save_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
tools = {
"info": {"name": "inspector", "tooltip": "Inspector"},
"bar-chart": {"name": "plotting", "tooltip": "Plotting"},
"camera": {
"name": "to_image",
"tooltip": "Save map as HTML or image",
},
"eraser": {
"name": "eraser",
"tooltip": "Remove all drawn features",
},
"folder-open": {
"name": "open_data",
"tooltip": "Open local vector/raster data",
},
# "cloud-download": {
# "name": "export_data",
# "tooltip": "Export Earth Engine data",
# },
"retweet": {
"name": "convert_js",
"tooltip": "Convert Earth Engine JavaScript to Python",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"google": {
"name": "geetoolbox",
"tooltip": "GEE Toolbox for cloud computing",
},
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"globe": {
"name": "timelapse",
"tooltip": "Create timelapse",
},
"fast-forward": {
"name": "timeslider",
"tooltip": "Activate timeslider",
},
"hand-o-up": {
"name": "draw",
"tooltip": "Collect training samples",
},
"line-chart": {
"name": "transect",
"tooltip": "Creating and plotting transects",
},
"random": {
"name": "sankee",
"tooltip": "Sankey plots",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
if kwargs["use_voila"]:
voila_tools = ["camera", "folder-open", "cloud-download", "gears"]
for item in voila_tools:
if item in tools.keys():
del tools[item]
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="107px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
self.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "to_image":
if tool_output_control not in self.controls:
self.add_control(tool_output_control)
with tool_output:
tool_output.clear_output()
display(save_map_widget)
elif tool_name == "eraser":
self.remove_drawn_features()
tool.value = False
elif tool_name == "inspector":
self.inspector_checked = tool.value
if not self.inspector_checked:
inspector_output.clear_output()
elif tool_name == "plotting":
self.plot_checked = True
plot_dropdown_widget = widgets.Dropdown(
options=list(self.ee_raster_layer_names),
)
plot_dropdown_widget.layout.width = "18ex"
self.plot_dropdown_widget = plot_dropdown_widget
plot_dropdown_control = WidgetControl(
widget=plot_dropdown_widget, position="topright"
)
self.plot_dropdown_control = plot_dropdown_control
self.add_control(plot_dropdown_control)
if self.draw_control in self.controls:
self.remove_control(self.draw_control)
self.add_control(self.draw_control_lite)
elif tool_name == "open_data":
from .toolbar import open_data_widget
open_data_widget(self)
elif tool_name == "convert_js":
from .toolbar import convert_js2py
convert_js2py(self)
elif tool_name == "whitebox":
import whiteboxgui.whiteboxgui as wbt
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict, max_width="800px", max_height="500px"
)
wbt_control = WidgetControl(
widget=wbt_toolbox, position="bottomright"
)
self.whitebox = wbt_control
self.add_control(wbt_control)
elif tool_name == "geetoolbox":
from .toolbar import get_tools_dict, build_toolbox
tools_dict = get_tools_dict()
gee_toolbox = build_toolbox(
tools_dict, max_width="800px", max_height="500px"
)
geetoolbox_control = WidgetControl(
widget=gee_toolbox, position="bottomright"
)
self.geetoolbox = geetoolbox_control
self.add_control(geetoolbox_control)
elif tool_name == "basemap":
from .toolbar import change_basemap
change_basemap(self)
elif tool_name == "timelapse":
from .toolbar import timelapse
timelapse(self)
self.toolbar_reset()
elif tool_name == "timeslider":
from .toolbar import time_slider
time_slider(self)
self.toolbar_reset()
elif tool_name == "draw":
from .toolbar import collect_samples
self.training_ctrl = None
collect_samples(self)
elif tool_name == "transect":
from .toolbar import plot_transect
plot_transect(self)
elif tool_name == "sankee":
from .toolbar import sankee_gui
# try:
# self.add_basemap("HYBRID")
# except Exception as _:
# pass
sample_roi = ee.Geometry.Polygon(
[
[
[-115.01184401606046, 36.24170785506492],
[-114.98849806879484, 36.29928186470082],
[-115.25628981684171, 36.35238941394592],
[-115.34692702387296, 36.310348922031565],
[-115.37988600824796, 36.160811202271944],
[-115.30298171137296, 36.03653336474891],
[-115.25628981684171, 36.05207884201088],
[-115.26590285395109, 36.226199908103695],
[-115.19174513910734, 36.25499793268206],
]
]
)
self.addLayer(sample_roi, {}, "Las Vegas")
sankee_gui(self)
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://geemap.org")
current_tool.value = False
else:
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "to_image":
tool_output.clear_output()
save_map_widget.children = [save_type, file_chooser]
if tool_output_control in self.controls:
self.remove_control(tool_output_control)
if tool_name == "inspector":
inspector_output.clear_output()
self.inspector_checked = False
if inspector_output_control in self.controls:
self.remove_control(inspector_output_control)
elif tool_name == "plotting":
self.plot_checked = False
plot_dropdown_widget = self.plot_dropdown_widget
plot_dropdown_control = self.plot_dropdown_control
if plot_dropdown_control in self.controls:
self.remove_control(plot_dropdown_control)
del plot_dropdown_widget
del plot_dropdown_control
if self.plot_control in self.controls:
plot_control = self.plot_control
plot_widget = self.plot_widget
self.remove_control(plot_control)
self.plot_control = None
self.plot_widget = None
del plot_control
del plot_widget
if (
self.plot_marker_cluster is not None
and self.plot_marker_cluster in self.layers
):
self.remove_layer(self.plot_marker_cluster)
if self.draw_control_lite in self.controls:
self.remove_control(self.draw_control_lite)
self.add_control(self.draw_control)
elif tool_name == "whitebox":
if self.whitebox is not None and self.whitebox in self.controls:
self.remove_control(self.whitebox)
elif tool_name == "convert_js":
if (
self.convert_ctrl is not None
and self.convert_ctrl in self.controls
):
self.remove_control(self.convert_ctrl)
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(
width="28px", height="28px", padding="0px 0px 0px 4px"
),
)
self.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=False,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
def all_layers_chk_changed(change):
if change["new"]:
for layer in self.layers:
layer.visible = True
else:
for layer in self.layers:
layer.visible = False
all_layers_chk.observe(all_layers_chk_changed, "value")
layers = [
lyr
for lyr in self.layers[1:]
if (
isinstance(lyr, TileLayer)
or isinstance(lyr, ipyleaflet.WMSLayer)
)
]
# if the layers contain unsupported layers (e.g., GeoJSON, GeoData), adds the ipyleaflet built-in LayerControl
if len(layers) < (len(self.layers) - 1):
if self.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
self.layer_control = layer_control
if self.layer_control not in self.controls:
self.add_control(self.layer_control)
# for non-TileLayer, use layer.style={'opacity':0, 'fillOpacity': 0} to turn layer off.
for layer in layers:
layer_chk = widgets.Checkbox(
value=layer.visible,
description=layer.name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_opacity = widgets.FloatSlider(
value=layer.opacity,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=layer.name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
def layer_vis_on_click(change):
if change["new"]:
layer_name = change["owner"].tooltip
# if layer_name in self.ee_raster_layer_names:
if layer_name in self.ee_layer_names:
layer_dict = self.ee_layer_dict[layer_name]
if self.vis_widget is not None:
self.vis_widget = None
self.vis_widget = self.create_vis_widget(layer_dict)
if self.vis_control in self.controls:
self.remove_control(self.vis_control)
self.vis_control = None
vis_control = WidgetControl(
widget=self.vis_widget, position="topright"
)
self.add_control((vis_control))
self.vis_control = vis_control
else:
if self.vis_widget is not None:
self.vis_widget = None
if self.vis_control is not None:
if self.vis_control in self.controls:
self.remove_control(self.vis_control)
self.vis_control = None
change["owner"].value = False
layer_settings.observe(layer_vis_on_click, "value")
def layer_chk_changed(change):
layer_name = change["owner"].description
if layer_name in self.ee_layer_names:
if change["new"]:
if "legend" in self.ee_layer_dict[layer_name].keys():
legend = self.ee_layer_dict[layer_name]["legend"]
if legend not in self.controls:
self.add_control(legend)
if "colorbar" in self.ee_layer_dict[layer_name].keys():
colorbar = self.ee_layer_dict[layer_name][
"colorbar"
]
if colorbar not in self.controls:
self.add_control(colorbar)
else:
if "legend" in self.ee_layer_dict[layer_name].keys():
legend = self.ee_layer_dict[layer_name]["legend"]
if legend in self.controls:
self.remove_control(legend)
if "colorbar" in self.ee_layer_dict[layer_name].keys():
colorbar = self.ee_layer_dict[layer_name][
"colorbar"
]
if colorbar in self.controls:
self.remove_control(colorbar)
layer_chk.observe(layer_chk_changed, "value")
widgets.jslink((layer_chk, "value"), (layer, "visible"))
widgets.jsdlink((layer_opacity, "value"), (layer, "opacity"))
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
toolbar_control = WidgetControl(widget=toolbar_widget, position="topright")
if kwargs.get("toolbar_ctrl"):
self.add_control(toolbar_control)
tool_output_control = WidgetControl(widget=tool_output, position="topright")
# self.add_control(tool_output_control)
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
if kwargs.get("type") == "click" and self.inspector_checked:
self.default_style = {"cursor": "wait"}
if inspector_output_control not in self.controls:
self.add_control(inspector_output_control)
sample_scale = self.getScale()
layers = self.ee_layers
with inspector_output:
inspector_output.clear_output(wait=True)
print(
f"Point ({latlon[1]:.4f}, {latlon[0]:.4f}) at {int(self.get_scale())}m/px"
)
xy = ee.Geometry.Point(latlon[::-1])
for index, ee_object in enumerate(layers):
layer_names = self.ee_layer_names
layer_name = layer_names[index]
object_type = ee_object.__class__.__name__
if not self.ee_layer_dict[layer_name]["ee_layer"].visible:
continue
try:
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(
ee_object,
ee.featurecollection.FeatureCollection,
)
):
ee_object = ee.FeatureCollection(ee_object)
if isinstance(ee_object, ee.Image):
item = ee_object.reduceRegion(
ee.Reducer.first(), xy, sample_scale
).getInfo()
b_name = "band"
if len(item) > 1:
b_name = "bands"
print(
"{}: {} ({} {})".format(
layer_name,
object_type,
len(item),
b_name,
)
)
keys = item.keys()
for key in keys:
print(" {}: {}".format(key, item[key]))
elif isinstance(ee_object, ee.FeatureCollection):
# Check geometry type
geom_type = (
ee.Feature(ee_object.first()).geometry().type()
)
lat, lon = latlon
delta = 0.005
bbox = ee.Geometry.BBox(
lon - delta,
lat - delta,
lon + delta,
lat + delta,
)
# Create a bounding box to filter points
xy = ee.Algorithms.If(
geom_type.compareTo(ee.String("Point")),
xy,
bbox,
)
filtered = ee_object.filterBounds(xy)
size = filtered.size().getInfo()
if size > 0:
first = filtered.first()
props = first.toDictionary().getInfo()
b_name = "property"
if len(props) > 1:
b_name = "properties"
print(
"{}: Feature ({} {})".format(
layer_name, len(props), b_name
)
)
keys = props.keys()
for key in keys:
print(" {}: {}".format(key, props[key]))
except Exception as e:
print(e)
self.default_style = {"cursor": "crosshair"}
if (
kwargs.get("type") == "click"
and self.plot_checked
and len(self.ee_raster_layers) > 0
):
plot_layer_name = self.plot_dropdown_widget.value
layer_names = self.ee_raster_layer_names
layers = self.ee_raster_layers
index = layer_names.index(plot_layer_name)
ee_object = layers[index]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
try:
self.default_style = {"cursor": "wait"}
plot_options = self.plot_options
sample_scale = self.getScale()
if "sample_scale" in plot_options.keys() and (
plot_options["sample_scale"] is not None
):
sample_scale = plot_options["sample_scale"]
if "title" not in plot_options.keys():
plot_options["title"] = plot_layer_name
if ("add_marker_cluster" in plot_options.keys()) and plot_options[
"add_marker_cluster"
]:
plot_coordinates = self.plot_coordinates
markers = self.plot_markers
marker_cluster = self.plot_marker_cluster
plot_coordinates.append(latlon)
self.plot_last_click = latlon
self.plot_all_clicks = plot_coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
self.plot_marker_cluster = marker_cluster
band_names = ee_object.bandNames().getInfo()
if any(len(name) > 3 for name in band_names):
band_names = list(range(1, len(band_names) + 1))
self.chart_labels = band_names
if self.roi_end:
if self.roi_reducer_scale is None:
scale = ee_object.select(0).projection().nominalScale()
else:
scale = self.roi_reducer_scale
dict_values = ee_object.reduceRegion(
reducer=self.roi_reducer,
geometry=self.user_roi,
scale=scale,
bestEffort=True,
).getInfo()
self.chart_points.append(
self.user_roi.centroid(1).coordinates().getInfo()
)
else:
xy = ee.Geometry.Point(latlon[::-1])
dict_values = (
ee_object.sample(xy, scale=sample_scale)
.first()
.toDictionary()
.getInfo()
)
self.chart_points.append(xy.coordinates().getInfo())
band_values = list(dict_values.values())
self.chart_values.append(band_values)
self.plot(band_names, band_values, **plot_options)
if plot_options["title"] == plot_layer_name:
del plot_options["title"]
self.default_style = {"cursor": "crosshair"}
self.roi_end = False
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {"cursor": "crosshair"}
self.roi_end = False
self.on_interaction(handle_interaction)
def set_options(self, mapTypeId="HYBRID", styles=None, types=None):
"""Adds Google basemap and controls to the ipyleaflet map.
Args:
mapTypeId (str, optional): A mapTypeId to set the basemap to. Can be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN" to select one of the standard Google Maps API map types. Defaults to 'HYBRID'.
styles (object, optional): A dictionary of custom MapTypeStyle objects keyed with a name that will appear in the map's Map Type Controls. Defaults to None.
types (list, optional): A list of mapTypeIds to make available. If omitted, but opt_styles is specified, appends all of the style keys to the standard Google Maps API map types.. Defaults to None.
"""
self.clear_layers()
self.clear_controls()
self.scroll_wheel_zoom = True
self.add_control(ipyleaflet.ZoomControl(position="topleft"))
self.add_control(ipyleaflet.LayersControl(position="topright"))
self.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
self.add_control(ipyleaflet.FullScreenControl())
self.add_control(ipyleaflet.DrawControl())
measure = ipyleaflet.MeasureControl(
position="bottomleft",
active_color="orange",
primary_length_unit="kilometers",
)
self.add_control(measure)
try:
self.add_layer(basemap_tiles[mapTypeId])
except Exception:
raise ValueError(
'Google basemaps can only be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN".'
)
setOptions = set_options
def add_ee_layer(
self, ee_object, vis_params={}, name=None, shown=True, opacity=1.0
):
"""Adds a given EE object to the map as a layer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer N'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
from box import Box
image = None
if name is None:
layer_count = len(self.layers)
name = "Layer " + str(layer_count + 1)
if (
not isinstance(ee_object, ee.Image)
and not isinstance(ee_object, ee.ImageCollection)
and not isinstance(ee_object, ee.FeatureCollection)
and not isinstance(ee_object, ee.Feature)
and not isinstance(ee_object, ee.Geometry)
):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(ee_object, ee.featurecollection.FeatureCollection)
):
features = ee.FeatureCollection(ee_object)
width = 2
if "width" in vis_params:
width = vis_params["width"]
color = "000000"
if "color" in vis_params:
color = vis_params["color"]
image_fill = features.style(**{"fillColor": color}).updateMask(
ee.Image.constant(0.5)
)
image_outline = features.style(
**{"color": color, "fillColor": "00000000", "width": width}
)
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
if "palette" in vis_params and isinstance(vis_params["palette"], Box):
try:
vis_params["palette"] = vis_params["palette"]["default"]
except Exception as e:
print("The provided palette is invalid.")
raise Exception(e)
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = TileLayer(
url=map_id_dict["tile_fetcher"].url_format,
attribution="Google Earth Engine",
name=name,
opacity=opacity,
visible=shown,
)
layer = self.find_layer(name=name)
if layer is not None:
existing_object = self.ee_layer_dict[name]["ee_object"]
if isinstance(existing_object, ee.Image) or isinstance(
existing_object, ee.ImageCollection
):
self.ee_raster_layers.remove(existing_object)
self.ee_raster_layer_names.remove(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(self.ee_raster_layer_names)
elif (
isinstance(ee_object, ee.Geometry)
or isinstance(ee_object, ee.Feature)
or isinstance(ee_object, ee.FeatureCollection)
):
self.ee_vector_layers.remove(existing_object)
self.ee_vector_layer_names.remove(name)
self.ee_layers.remove(existing_object)
self.ee_layer_names.remove(name)
self.remove_layer(layer)
self.ee_layers.append(ee_object)
if name not in self.ee_layer_names:
self.ee_layer_names.append(name)
self.ee_layer_dict[name] = {
"ee_object": ee_object,
"ee_layer": tile_layer,
"vis_params": vis_params,
}
self.add_layer(tile_layer)
self.last_ee_layer = self.ee_layer_dict[name]
self.last_ee_data = self.ee_layer_dict[name]["ee_object"]
if isinstance(ee_object, ee.Image) or isinstance(ee_object, ee.ImageCollection):
self.ee_raster_layers.append(ee_object)
self.ee_raster_layer_names.append(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(self.ee_raster_layer_names)
elif (
isinstance(ee_object, ee.Geometry)
or isinstance(ee_object, ee.Feature)
or isinstance(ee_object, ee.FeatureCollection)
):
self.ee_vector_layers.append(ee_object)
self.ee_vector_layer_names.append(name)
addLayer = add_ee_layer
def remove_ee_layer(self, name):
"""Removes an Earth Engine layer.
Args:
name (str): The name of the Earth Engine layer to remove.
"""
if name in self.ee_layer_dict:
ee_object = self.ee_layer_dict[name]["ee_object"]
ee_layer = self.ee_layer_dict[name]["ee_layer"]
if name in self.ee_raster_layer_names:
self.ee_raster_layer_names.remove(name)
self.ee_raster_layers.remove(ee_object)
elif name in self.ee_vector_layer_names:
self.ee_vector_layer_names.remove(name)
self.ee_vector_layers.remove(ee_object)
self.ee_layers.remove(ee_object)
self.ee_layer_names.remove(name)
if ee_layer in self.layers:
self.remove_layer(ee_layer)
def draw_layer_on_top(self):
"""Move user-drawn feature layer to the top of all layers."""
draw_layer_index = self.find_layer_index(name="Drawn Features")
if draw_layer_index > -1 and draw_layer_index < (len(self.layers) - 1):
layers = list(self.layers)
layers = (
layers[0:draw_layer_index]
+ layers[(draw_layer_index + 1) :]
+ [layers[draw_layer_index]]
)
self.layers = layers
def set_center(self, lon, lat, zoom=None):
"""Centers the map view at a given coordinates with the given zoom level.
Args:
lon (float): The longitude of the center, in degrees.
lat (float): The latitude of the center, in degrees.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.
"""
self.center = (lat, lon)
if zoom is not None:
self.zoom = zoom
setCenter = set_center
def center_object(self, ee_object, zoom=None):
"""Centers the map view on a given object.
Args:
ee_object (Element|Geometry): An Earth Engine object to center on a geometry, image or feature.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.
"""
if zoom is None and hasattr(self, "fit_bounds"):
self.zoom_to_object(ee_object)
else:
lat = 0
lon = 0
if isinstance(ee_object, ee.geometry.Geometry):
centroid = ee_object.centroid(1)
lon, lat = centroid.getInfo()["coordinates"]
else:
try:
centroid = ee_object.geometry().centroid(1)
lon, lat = centroid.getInfo()["coordinates"]
except Exception as e:
print(e)
raise Exception(e)
self.setCenter(lon, lat, zoom)
centerObject = center_object
def zoom_to_object(self, ee_object):
"""Zoom to the full extent of an Earth Engine object.
Args:
ee_object (object): An Earth Engine object, such as Image, ImageCollection, Geometry, Feature, FeatureCollection.
Raises:
Exception: Error getting geometry.
"""
coordinates = None
if isinstance(ee_object, ee.geometry.Geometry):
bounds = ee_object.bounds()
coordinates = bounds.getInfo()["coordinates"][0]
else:
try:
bounds = ee_object.geometry().bounds()
coordinates = bounds.getInfo()["coordinates"][0]
except Exception as e:
print(e)
raise Exception(e)
if coordinates is not None:
south = coordinates[0][1]
west = coordinates[0][0]
north = coordinates[2][1]
east = coordinates[2][0]
self.fit_bounds([[south, east], [north, west]])
zoomToObject = zoom_to_object
def get_scale(self):
"""Returns the approximate pixel scale of the current map view, in meters.
Returns:
float: Map resolution in meters.
"""
zoom_level = self.zoom
# Reference: https://blogs.bing.com/maps/2006/02/25/map-control-zoom-levels-gt-resolution
resolution = 156543.04 * math.cos(0) / math.pow(2, zoom_level)
return resolution
getScale = get_scale
def add_basemap(self, basemap="HYBRID"):
"""Adds a basemap to the map.
Args:
basemap (str, optional): Can be one of string from ee_basemaps. Defaults to 'HYBRID'.
"""
try:
if (
basemap in basemap_tiles.keys()
and basemap_tiles[basemap] not in self.layers
):
self.add_layer(basemap_tiles[basemap])
except Exception:
raise ValueError(
"Basemap can only be one of the following:\n {}".format(
"\n ".join(basemap_tiles.keys())
)
)
def find_layer(self, name):
"""Finds layer by name
Args:
name (str): Name of the layer to find.
Returns:
object: ipyleaflet layer object.
"""
layers = self.layers
for layer in layers:
if layer.name == name:
return layer
return None
def find_layer_index(self, name):
"""Finds layer index by name
Args:
name (str): Name of the layer to find.
Returns:
int: Index of the layer with the specified name
"""
layers = self.layers
for index, layer in enumerate(layers):
if layer.name == name:
return index
return -1
def layer_opacity(self, name, value=1.0):
"""Changes layer opacity.
Args:
name (str): The name of the layer to change opacity.
value (float, optional): The opacity value to set. Defaults to 1.0.
"""
layer = self.find_layer(name)
try:
layer.opacity = value
except Exception as e:
raise Exception(e)
def add_wms_layer(
self,
url,
layers,
name=None,
attribution="",
format="image/jpeg",
transparent=False,
opacity=1.0,
shown=True,
**kwargs,
):
"""Add a WMS layer to the map.
Args:
url (str): The URL of the WMS web service.
layers (str): Comma-separated list of WMS layers to show.
name (str, optional): The layer name to use on the layer control. Defaults to None.
attribution (str, optional): The attribution of the data layer. Defaults to ''.
format (str, optional): WMS image format (use ‘image/png’ for layers with transparency). Defaults to 'image/jpeg'.
transparent (bool, optional): If True, the WMS service will return images with transparency. Defaults to False.
opacity (float, optional): The opacity of the layer. Defaults to 1.0.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
"""
if name is None:
name = str(layers)
try:
wms_layer = ipyleaflet.WMSLayer(
url=url,
layers=layers,
name=name,
attribution=attribution,
format=format,
transparent=transparent,
opacity=opacity,
visible=shown,
**kwargs,
)
self.add_layer(wms_layer)
except Exception as e:
print("Failed to add the specified WMS TileLayer.")
raise Exception(e)
def add_tile_layer(
self,
url="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
**kwargs,
):
"""Adds a TileLayer to the map.
Args:
url (str, optional): The URL of the tile layer. Defaults to 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
"""
try:
tile_layer = TileLayer(
url=url,
name=name,
attribution=attribution,
opacity=opacity,
visible=shown,
**kwargs,
)
self.add_layer(tile_layer)
except Exception as e:
print("Failed to add the specified TileLayer.")
raise Exception(e)
def add_COG_layer(
self,
url,
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
**kwargs,
):
"""Adds a COG TileLayer to the map.
Args:
url (str): The URL of the COG tile layer.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
"""
tile_url = get_COG_tile(url, titiler_endpoint, **kwargs)
center = get_COG_center(url, titiler_endpoint) # (lon, lat)
self.add_tile_layer(tile_url, name, attribution, opacity, shown)
self.set_center(lon=center[0], lat=center[1], zoom=10)
def add_COG_mosaic(
self,
links,
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
username="anonymous",
overwrite=False,
show_footprints=False,
verbose=True,
**kwargs,
):
"""Add a virtual mosaic of COGs to the map.
Args:
links (list): A list of links pointing to COGs.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
username (str, optional): The username to create mosaic using the titiler endpoint. Defaults to 'anonymous'.
overwrite (bool, optional): Whether or not to replace existing layer with the same layer name. Defaults to False.
show_footprints (bool, optional): Whether or not to show footprints of COGs. Defaults to False.
verbose (bool, optional): Whether or not to print descriptions. Defaults to True.
"""
layername = name.replace(" ", "_")
tile = get_COG_mosaic(
links,
titiler_endpoint=titiler_endpoint,
username=username,
layername=layername,
overwrite=overwrite,
verbose=verbose,
)
self.add_tile_layer(tile, name, attribution, opacity, shown)
if show_footprints:
if verbose:
print(
f"Generating footprints of {len(links)} COGs. This might take a while ..."
)
coords = []
for link in links:
coord = get_COG_bounds(link)
if coord is not None:
coords.append(coord)
fc = coords_to_geojson(coords)
geo_json = ipyleaflet.GeoJSON(
data=fc,
style={
"opacity": 1,
"dashArray": "1",
"fillOpacity": 0,
"weight": 1,
},
name="Footprints",
)
self.add_layer(geo_json)
center = get_center(fc)
if verbose:
print("The footprint layer has been added.")
else:
center = get_COG_center(links[0], titiler_endpoint)
self.set_center(center[0], center[1], zoom=6)
def add_STAC_layer(
self,
url,
bands=None,
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
**kwargs,
):
"""Adds a STAC TileLayer to the map.
Args:
url (str): The URL of the COG tile layer.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
"""
tile_url = get_STAC_tile(url, bands, titiler_endpoint, **kwargs)
center = get_STAC_center(url, titiler_endpoint)
self.add_tile_layer(tile_url, name, attribution, opacity, shown)
self.set_center(lon=center[0], lat=center[1], zoom=10)
def add_minimap(self, zoom=5, position="bottomright"):
"""Adds a minimap (overview) to the ipyleaflet map.
Args:
zoom (int, optional): Initial map zoom level. Defaults to 5.
position (str, optional): Position of the minimap. Defaults to "bottomright".
"""
minimap = ipyleaflet.Map(
zoom_control=False,
attribution_control=False,
zoom=zoom,
center=self.center,
layers=[basemap_tiles["ROADMAP"]],
)
minimap.layout.width = "150px"
minimap.layout.height = "150px"
ipyleaflet.link((minimap, "center"), (self, "center"))
minimap_control = WidgetControl(widget=minimap, position=position)
self.add_control(minimap_control)
def marker_cluster(self):
"""Adds a marker cluster to the map and returns a list of ee.Feature, which can be accessed using Map.ee_marker_cluster.
Returns:
object: a list of ee.Feature
"""
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.ee_markers = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
if kwargs.get("type") == "click":
coordinates.append(latlon)
geom = ee.Geometry.Point(latlon[1], latlon[0])
feature = ee.Feature(geom)
self.ee_markers.append(feature)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get("type") == "mousemove":
pass
# cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp
self.default_style = {"cursor": "crosshair"}
self.on_interaction(handle_interaction)
def set_plot_options(
self,
add_marker_cluster=False,
sample_scale=None,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""Sets plotting options.
Args:
add_marker_cluster (bool, optional): Whether to add a marker cluster. Defaults to False.
sample_scale (float, optional): A nominal scale in meters of the projection to sample in . Defaults to None.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
plot_options_dict = {}
plot_options_dict["add_marker_cluster"] = add_marker_cluster
plot_options_dict["sample_scale"] = sample_scale
plot_options_dict["plot_type"] = plot_type
plot_options_dict["overlay"] = overlay
plot_options_dict["position"] = position
plot_options_dict["min_width"] = min_width
plot_options_dict["max_width"] = max_width
plot_options_dict["min_height"] = min_height
plot_options_dict["max_height"] = max_height
for key in kwargs.keys():
plot_options_dict[key] = kwargs[key]
self.plot_options = plot_options_dict
if add_marker_cluster and (self.plot_marker_cluster not in self.layers):
self.add_layer(self.plot_marker_cluster)
def plot(
self,
x,
y,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""Creates a plot based on x-array and y-array data.
Args:
x (numpy.ndarray or list): The x-coordinates of the plotted line.
y (numpy.ndarray or list): The y-coordinates of the plotted line.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
if self.plot_widget is not None:
plot_widget = self.plot_widget
else:
plot_widget = widgets.Output(layout={"border": "1px solid black"})
plot_control = WidgetControl(
widget=plot_widget,
position=position,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
if max_width is None:
max_width = 500
if max_height is None:
max_height = 300
if (plot_type is None) and ("markers" not in kwargs.keys()):
kwargs["markers"] = "circle"
with plot_widget:
try:
fig = plt.figure(1, **kwargs)
if max_width is not None:
fig.layout.width = str(max_width) + "px"
if max_height is not None:
fig.layout.height = str(max_height) + "px"
plot_widget.clear_output(wait=True)
if not overlay:
plt.clear()
if plot_type is None:
if "marker" not in kwargs.keys():
kwargs["marker"] = "circle"
plt.plot(x, y, **kwargs)
elif plot_type == "bar":
plt.bar(x, y, **kwargs)
elif plot_type == "scatter":
plt.scatter(x, y, **kwargs)
elif plot_type == "hist":
plt.hist(y, **kwargs)
plt.show()
except Exception as e:
print("Failed to create plot.")
raise Exception(e)
def plot_demo(
self,
iterations=20,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""A demo of interactive plotting using random pixel coordinates.
Args:
iterations (int, optional): How many iterations to run for the demo. Defaults to 20.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
import numpy as np
import time
if self.random_marker is not None:
self.remove_layer(self.random_marker)
image = ee.Image("LE7_TOA_5YEAR/1999_2003").select([0, 1, 2, 3, 4, 6])
self.addLayer(
image,
{"bands": ["B4", "B3", "B2"], "gamma": 1.4},
"LE7_TOA_5YEAR/1999_2003",
)
self.setCenter(-50.078877, 25.190030, 3)
band_names = image.bandNames().getInfo()
# band_count = len(band_names)
latitudes = np.random.uniform(30, 48, size=iterations)
longitudes = np.random.uniform(-121, -76, size=iterations)
marker = Marker(location=(0, 0))
self.random_marker = marker
self.add_layer(marker)
for i in range(iterations):
try:
coordinate = ee.Geometry.Point([longitudes[i], latitudes[i]])
dict_values = image.sample(coordinate).first().toDictionary().getInfo()
band_values = list(dict_values.values())
title = "{}/{}: Spectral signature at ({}, {})".format(
i + 1,
iterations,
round(latitudes[i], 2),
round(longitudes[i], 2),
)
marker.location = (latitudes[i], longitudes[i])
self.plot(
band_names,
band_values,
plot_type=plot_type,
overlay=overlay,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
title=title,
**kwargs,
)
time.sleep(0.3)
except Exception as e:
raise Exception(e)
def plot_raster(
self,
ee_object=None,
sample_scale=None,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""Interactive plotting of Earth Engine data by clicking on the map.
Args:
ee_object (object, optional): The ee.Image or ee.ImageCollection to sample. Defaults to None.
sample_scale (float, optional): A nominal scale in meters of the projection to sample in. Defaults to None.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
if self.plot_control is not None:
del self.plot_widget
if self.plot_control in self.controls:
self.remove_control(self.plot_control)
if self.random_marker is not None:
self.remove_layer(self.random_marker)
plot_widget = widgets.Output(layout={"border": "1px solid black"})
plot_control = WidgetControl(
widget=plot_widget,
position=position,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
self.default_style = {"cursor": "crosshair"}
msg = "The plot function can only be used on ee.Image or ee.ImageCollection with more than one band."
if (ee_object is None) and len(self.ee_raster_layers) > 0:
ee_object = self.ee_raster_layers[-1]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif not isinstance(ee_object, ee.Image):
print(msg)
return
if sample_scale is None:
sample_scale = self.getScale()
if max_width is None:
max_width = 500
band_names = ee_object.bandNames().getInfo()
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs2):
latlon = kwargs2.get("coordinates")
if kwargs2.get("type") == "click":
try:
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
self.default_style = {"cursor": "wait"}
xy = ee.Geometry.Point(latlon[::-1])
dict_values = (
ee_object.sample(xy, scale=sample_scale)
.first()
.toDictionary()
.getInfo()
)
band_values = list(dict_values.values())
self.plot(
band_names,
band_values,
plot_type=plot_type,
overlay=overlay,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
**kwargs,
)
self.default_style = {"cursor": "crosshair"}
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {"cursor": "crosshair"}
self.on_interaction(handle_interaction)
def add_maker_cluster(self, event="click", add_marker=True):
"""Captures user inputs and add markers to the map.
Args:
event (str, optional): [description]. Defaults to 'click'.
add_marker (bool, optional): If True, add markers to the map. Defaults to True.
Returns:
object: a marker cluster.
"""
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
if add_marker:
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
if event == "click" and kwargs.get("type") == "click":
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
if add_marker:
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get("type") == "mousemove":
pass
# cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp
self.default_style = {"cursor": "crosshair"}
self.on_interaction(handle_interaction)
def set_control_visibility(
self, layerControl=True, fullscreenControl=True, latLngPopup=True
):
"""Sets the visibility of the controls on the map.
Args:
layerControl (bool, optional): Whether to show the control that allows the user to toggle layers on/off. Defaults to True.
fullscreenControl (bool, optional): Whether to show the control that allows the user to make the map full-screen. Defaults to True.
latLngPopup (bool, optional): Whether to show the control that pops up the Lat/lon when the user clicks on the map. Defaults to True.
"""
pass
setControlVisibility = set_control_visibility
def add_layer_control(self):
"""Adds the layer control to the map."""
pass
addLayerControl = add_layer_control
def split_map(self, left_layer="HYBRID", right_layer="ESRI"):
"""Adds split map.
Args:
left_layer (str, optional): The layer tile layer. Defaults to 'HYBRID'.
right_layer (str, optional): The right tile layer. Defaults to 'ESRI'.
"""
try:
if left_layer in basemap_tiles.keys():
left_layer = basemap_tiles[left_layer]
if right_layer in basemap_tiles.keys():
right_layer = basemap_tiles[right_layer]
control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer
)
self.add_control(control)
except Exception as e:
print("The provided layers are invalid!")
raise ValueError(e)
def ts_inspector(
self,
left_ts,
right_ts,
left_names,
right_names,
left_vis={},
right_vis={},
):
"""Creates a split-panel map for inspecting timeseries images.
Args:
left_ts (object): An ee.ImageCollection to show on the left panel.
right_ts (object): An ee.ImageCollection to show on the right panel.
left_names (list): A list of names to show under the left dropdown.
right_names (list): A list of names to show under the right dropdown.
left_vis (dict, optional): Visualization parameters for the left layer. Defaults to {}.
right_vis (dict, optional): Visualization parameters for the right layer. Defaults to {}.
"""
left_count = int(left_ts.size().getInfo())
right_count = int(right_ts.size().getInfo())
if left_count != len(left_names):
print(
"The number of images in left_ts must match the number of layer names in left_names."
)
return
if right_count != len(right_names):
print(
"The number of images in right_ts must match the number of layer names in right_names."
)
return
left_layer = TileLayer(
url="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attribution="Google",
name="Google Maps",
)
right_layer = TileLayer(
url="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attribution="Google",
name="Google Maps",
)
self.clear_controls()
left_dropdown = widgets.Dropdown(options=left_names, value=None)
right_dropdown = widgets.Dropdown(options=right_names, value=None)
left_dropdown.layout.max_width = "130px"
right_dropdown.layout.max_width = "130px"
left_control = WidgetControl(widget=left_dropdown, position="topleft")
right_control = WidgetControl(widget=right_dropdown, position="topright")
self.add_control(control=left_control)
self.add_control(control=right_control)
self.add_control(ipyleaflet.ZoomControl(position="topleft"))
self.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
self.add_control(ipyleaflet.FullScreenControl())
def left_dropdown_change(change):
left_dropdown_index = left_dropdown.index
if left_dropdown_index is not None and left_dropdown_index >= 0:
try:
if isinstance(left_ts, ee.ImageCollection):
left_image = left_ts.toList(left_ts.size()).get(
left_dropdown_index
)
elif isinstance(left_ts, ee.List):
left_image = left_ts.get(left_dropdown_index)
else:
print("The left_ts argument must be an ImageCollection.")
return
if isinstance(left_image, ee.ImageCollection):
left_image = ee.Image(left_image.mosaic())
elif isinstance(left_image, ee.Image):
pass
else:
left_image = ee.Image(left_image)
left_image = ee_tile_layer(
left_image, left_vis, left_names[left_dropdown_index]
)
left_layer.url = left_image.url
except Exception as e:
print(e)
return
left_dropdown.observe(left_dropdown_change, names="value")
def right_dropdown_change(change):
right_dropdown_index = right_dropdown.index
if right_dropdown_index is not None and right_dropdown_index >= 0:
try:
if isinstance(right_ts, ee.ImageCollection):
right_image = right_ts.toList(left_ts.size()).get(
right_dropdown_index
)
elif isinstance(right_ts, ee.List):
right_image = right_ts.get(right_dropdown_index)
else:
print("The left_ts argument must be an ImageCollection.")
return
if isinstance(right_image, ee.ImageCollection):
right_image = ee.Image(right_image.mosaic())
elif isinstance(right_image, ee.Image):
pass
else:
right_image = ee.Image(right_image)
right_image = ee_tile_layer(
right_image,
right_vis,
right_names[right_dropdown_index],
)
right_layer.url = right_image.url
except Exception as e:
print(e)
return
right_dropdown.observe(right_dropdown_change, names="value")
try:
split_control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer
)
self.add_control(split_control)
except Exception as e:
raise Exception(e)
def basemap_demo(self):
"""A demo for using geemap basemaps."""
dropdown = widgets.Dropdown(
options=list(basemap_tiles.keys()),
value="HYBRID",
description="Basemaps",
)
def on_click(change):
basemap_name = change["new"]
old_basemap = self.layers[-1]
self.substitute_layer(old_basemap, basemap_tiles[basemap_name])
dropdown.observe(on_click, "value")
basemap_control = WidgetControl(widget=dropdown, position="topright")
self.add_control(basemap_control)
def add_legend(
self,
legend_title="Legend",
legend_dict=None,
legend_keys=None,
legend_colors=None,
position="bottomright",
builtin_legend=None,
layer_name=None,
**kwargs,
):
"""Adds a customized basemap to the map.
Args:
legend_title (str, optional): Title of the legend. Defaults to 'Legend'.
legend_dict (dict, optional): A dictionary containing legend items as keys and color as values. If provided, legend_keys and legend_colors will be ignored. Defaults to None.
legend_keys (list, optional): A list of legend keys. Defaults to None.
legend_colors (list, optional): A list of legend colors. Defaults to None.
position (str, optional): Position of the legend. Defaults to 'bottomright'.
builtin_legend (str, optional): Name of the builtin legend to add to the map. Defaults to None.
layer_name (str, optional): Layer name of the legend to be associated with. Defaults to None.
"""
import pkg_resources
from IPython.display import display
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py")
)
legend_template = os.path.join(pkg_dir, "data/template/legend.html")
if "min_width" not in kwargs.keys():
min_width = None
if "max_width" not in kwargs.keys():
max_width = None
else:
max_width = kwargs["max_width"]
if "min_height" not in kwargs.keys():
min_height = None
else:
min_height = kwargs["min_height"]
if "max_height" not in kwargs.keys():
max_height = None
else:
max_height = kwargs["max_height"]
if "height" not in kwargs.keys():
height = None
else:
height = kwargs["height"]
if "width" not in kwargs.keys():
width = None
else:
width = kwargs["width"]
if width is None:
max_width = "300px"
if height is None:
max_height = "400px"
if not os.path.exists(legend_template):
print("The legend template does not exist.")
return
if legend_keys is not None:
if not isinstance(legend_keys, list):
print("The legend keys must be a list.")
return
else:
legend_keys = ["One", "Two", "Three", "Four", "ect"]
if legend_colors is not None:
if not isinstance(legend_colors, list):
print("The legend colors must be a list.")
return
elif all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
elif all(
(item.startswith("#") and len(item) == 7) for item in legend_colors
):
pass
elif all((len(item) == 6) for item in legend_colors):
pass
else:
print("The legend colors must be a list of tuples.")
return
else:
legend_colors = [
"#8DD3C7",
"#FFFFB3",
"#BEBADA",
"#FB8072",
"#80B1D3",
]
if len(legend_keys) != len(legend_colors):
print("The legend keys and values must be the same length.")
return
allowed_builtin_legends = builtin_legends.keys()
if builtin_legend is not None:
if builtin_legend not in allowed_builtin_legends:
print(
"The builtin legend must be one of the following: {}".format(
", ".join(allowed_builtin_legends)
)
)
return
else:
legend_dict = builtin_legends[builtin_legend]
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if legend_dict is not None:
if not isinstance(legend_dict, dict):
print("The legend dict must be a dictionary.")
return
else:
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
allowed_positions = [
"topleft",
"topright",
"bottomleft",
"bottomright",
]
if position not in allowed_positions:
print(
"The position must be one of the following: {}".format(
", ".join(allowed_positions)
)
)
return
header = []
content = []
footer = []
with open(legend_template) as f:
lines = f.readlines()
lines[3] = lines[3].replace("Legend", legend_title)
header = lines[:6]
footer = lines[11:]
for index, key in enumerate(legend_keys):
color = legend_colors[index]
if not color.startswith("#"):
color = "#" + color
item = " <li><span style='background:{};'></span>{}</li>\n".format(
color, key
)
content.append(item)
legend_html = header + content + footer
legend_text = "".join(legend_html)
try:
legend_output_widget = widgets.Output(
layout={
# "border": "1px solid black",
"max_width": max_width,
"min_width": min_width,
"max_height": max_height,
"min_height": min_height,
"height": height,
"width": width,
"overflow": "scroll",
}
)
legend_control = WidgetControl(
widget=legend_output_widget, position=position
)
legend_widget = widgets.HTML(value=legend_text)
with legend_output_widget:
display(legend_widget)
self.legend_widget = legend_output_widget
self.legend_control = legend_control
self.add_control(legend_control)
if layer_name in self.ee_layer_names:
self.ee_layer_dict[layer_name]["legend"] = legend_control
except Exception as e:
raise Exception(e)
def add_colorbar(
self,
vis_params=None,
cmap="gray",
discrete=False,
label=None,
orientation="horizontal",
position="bottomright",
transparent_bg=False,
layer_name=None,
**kwargs,
):
"""Add a matplotlib colorbar to the map
Args:
vis_params (dict): Visualization parameters as a dictionary. See https://developers.google.com/earth-engine/guides/image_visualization for options.
cmap (str, optional): Matplotlib colormap. Defaults to "gray". See https://matplotlib.org/3.3.4/tutorials/colors/colormaps.html#sphx-glr-tutorials-colors-colormaps-py for options.
discrete (bool, optional): Whether to create a discrete colorbar. Defaults to False.
label (str, optional): Label for the colorbar. Defaults to None.
orientation (str, optional): Orientation of the colorbar, such as "vertical" and "horizontal". Defaults to "horizontal".
position (str, optional): Position of the colorbar on the map. It can be one of: topleft, topright, bottomleft, and bottomright. Defaults to "bottomright".
transparent_bg (bool, optional): Whether to use transparent background. Defaults to False.
layer_name (str, optional): The layer name associated with the colorbar. Defaults to None.
Raises:
TypeError: If the vis_params is not a dictionary.
ValueError: If the orientation is not either horizontal or vertical.
ValueError: If the provided min value is not scalar type.
ValueError: If the provided max value is not scalar type.
ValueError: If the provided opacity value is not scalar type.
ValueError: If cmap or palette is not provided.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
if isinstance(vis_params, list):
vis_params = {"palette": vis_params}
elif isinstance(vis_params, tuple):
vis_params = {"palette": list(vis_params)}
elif vis_params is None:
vis_params = {}
if "colors" in kwargs and isinstance(kwargs["colors"], list):
vis_params["palette"] = kwargs["colors"]
if "colors" in kwargs and isinstance(kwargs["colors"], tuple):
vis_params["palette"] = list(kwargs["colors"])
if "vmin" in kwargs:
vis_params["min"] = kwargs["vmin"]
del kwargs["vmin"]
if "vmax" in kwargs:
vis_params["max"] = kwargs["vmax"]
del kwargs["vmax"]
if "caption" in kwargs:
label = kwargs["caption"]
del kwargs["caption"]
if not isinstance(vis_params, dict):
raise TypeError("The vis_params must be a dictionary.")
if orientation not in ["horizontal", "vertical"]:
raise ValueError("The orientation must be either horizontal or vertical.")
if orientation == "horizontal":
width, height = 6.0, 0.4
else:
width, height = 0.4, 4.0
if "width" in kwargs:
width = kwargs["width"]
kwargs.pop("width")
if "height" in kwargs:
height = kwargs["height"]
kwargs.pop("height")
vis_keys = list(vis_params.keys())
if "min" in vis_params:
vmin = vis_params["min"]
if type(vmin) not in (int, float):
raise ValueError("The provided min value must be scalar type.")
else:
vmin = 0
if "max" in vis_params:
vmax = vis_params["max"]
if type(vmax) not in (int, float):
raise ValueError("The provided max value must be scalar type.")
else:
vmax = 1
if "opacity" in vis_params:
alpha = vis_params["opacity"]
if type(alpha) not in (int, float):
raise ValueError("The provided opacity value must be type scalar.")
elif "alpha" in kwargs:
alpha = kwargs["alpha"]
else:
alpha = 1
if cmap is not None:
cmap = mpl.pyplot.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
if "palette" in vis_keys:
hexcodes = to_hex_colors(vis_params["palette"])
if discrete:
cmap = mpl.colors.ListedColormap(hexcodes)
vals = np.linspace(vmin, vmax, cmap.N + 1)
norm = mpl.colors.BoundaryNorm(vals, cmap.N)
else:
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", hexcodes, N=256
)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
elif cmap is not None:
cmap = mpl.pyplot.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
else:
raise ValueError(
'cmap keyword or "palette" key in vis_params must be provided.'
)
_, ax = plt.subplots(figsize=(width, height))
cb = mpl.colorbar.ColorbarBase(
ax, norm=norm, alpha=alpha, cmap=cmap, orientation=orientation, **kwargs
)
if "bands" in vis_keys:
cb.set_label(vis_params["bands"])
elif label is not None:
cb.set_label(label)
output = widgets.Output()
colormap_ctrl = WidgetControl(
widget=output,
position=position,
transparent_bg=transparent_bg,
)
with output:
output.clear_output()
plt.show()
self.colorbar = colormap_ctrl
if layer_name in self.ee_layer_names:
if "colorbar" in self.ee_layer_dict[layer_name]:
self.remove_control(self.ee_layer_dict[layer_name]["colorbar"])
self.ee_layer_dict[layer_name]["colorbar"] = colormap_ctrl
self.add_control(colormap_ctrl)
def add_colorbar_branca(
self,
colors,
vmin=0,
vmax=1.0,
index=None,
caption="",
categorical=False,
step=None,
height="45px",
transparent_bg=False,
position="bottomright",
layer_name=None,
**kwargs,
):
"""Add a branca colorbar to the map.
Args:
colors (list): The set of colors to be used for interpolation. Colors can be provided in the form: * tuples of RGBA ints between 0 and 255 (e.g: (255, 255, 0) or (255, 255, 0, 255)) * tuples of RGBA floats between 0. and 1. (e.g: (1.,1.,0.) or (1., 1., 0., 1.)) * HTML-like string (e.g: “#ffff00) * a color name or shortcut (e.g: “y” or “yellow”)
vmin (int, optional): The minimal value for the colormap. Values lower than vmin will be bound directly to colors[0].. Defaults to 0.
vmax (float, optional): The maximal value for the colormap. Values higher than vmax will be bound directly to colors[-1]. Defaults to 1.0.
index (list, optional):The values corresponding to each color. It has to be sorted, and have the same length as colors. If None, a regular grid between vmin and vmax is created.. Defaults to None.
caption (str, optional): The caption for the colormap. Defaults to "".
categorical (bool, optional): Whether or not to create a categorical colormap. Defaults to False.
step (int, optional): The step to split the LinearColormap into a StepColormap. Defaults to None.
height (str, optional): The height of the colormap widget. Defaults to "45px".
transparent_bg (bool, optional): Whether to use transparent background for the colormap widget. Defaults to True.
position (str, optional): The position for the colormap widget. Defaults to "bottomright".
layer_name (str, optional): Layer name of the colorbar to be associated with. Defaults to None.
"""
from box import Box
from branca.colormap import LinearColormap
output = widgets.Output()
output.layout.height = height
if "width" in kwargs.keys():
output.layout.width = kwargs["width"]
if isinstance(colors, Box):
try:
colors = list(colors["default"])
except Exception as e:
print("The provided color list is invalid.")
raise Exception(e)
if all(len(color) == 6 for color in colors):
colors = ["#" + color for color in colors]
colormap = LinearColormap(
colors=colors, index=index, vmin=vmin, vmax=vmax, caption=caption
)
if categorical:
if step is not None:
colormap = colormap.to_step(step)
elif index is not None:
colormap = colormap.to_step(len(index) - 1)
else:
colormap = colormap.to_step(3)
colormap_ctrl = WidgetControl(
widget=output,
position=position,
transparent_bg=transparent_bg,
**kwargs,
)
with output:
output.clear_output()
display(colormap)
self.colorbar = colormap_ctrl
self.add_control(colormap_ctrl)
if layer_name in self.ee_layer_names:
self.ee_layer_dict[layer_name]["colorbar"] = colormap_ctrl
def remove_colorbar(self):
"""Remove colorbar from the map."""
if self.colorbar is not None:
self.remove_control(self.colorbar)
def image_overlay(self, url, bounds, name):
"""Overlays an image from the Internet or locally on the map.
Args:
url (str): http URL or local file path to the image.
bounds (tuple): bounding box of the image in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).
name (str): name of the layer to show on the layer control.
"""
from base64 import b64encode
from PIL import Image, ImageSequence
from io import BytesIO
try:
if not url.startswith("http"):
if not os.path.exists(url):
print("The provided file does not exist.")
return
ext = os.path.splitext(url)[1][1:] # file extension
image = Image.open(url)
f = BytesIO()
if ext.lower() == "gif":
frames = []
# Loop over each frame in the animated image
for frame in ImageSequence.Iterator(image):
frame = frame.convert("RGBA")
b = BytesIO()
frame.save(b, format="gif")
frame = Image.open(b)
frames.append(frame)
frames[0].save(
f,
format="GIF",
save_all=True,
append_images=frames[1:],
loop=0,
)
else:
image.save(f, ext)
data = b64encode(f.getvalue())
data = data.decode("ascii")
url = "data:image/{};base64,".format(ext) + data
img = ipyleaflet.ImageOverlay(url=url, bounds=bounds, name=name)
self.add_layer(img)
except Exception as e:
print(e)
def video_overlay(self, url, bounds, name):
"""Overlays a video from the Internet on the map.
Args:
url (str): http URL of the video, such as "https://www.mapbox.com/bites/00188/patricia_nasa.webm"
bounds (tuple): bounding box of the video in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).
name (str): name of the layer to show on the layer control.
"""
try:
video = ipyleaflet.VideoOverlay(url=url, bounds=bounds, name=name)
self.add_layer(video)
except Exception as e:
print(e)
def add_landsat_ts_gif(
self,
layer_name="Timelapse",
roi=None,
label=None,
start_year=1984,
end_year=2019,
start_date="06-10",
end_date="09-20",
bands=["NIR", "Red", "Green"],
vis_params=None,
dimensions=768,
frames_per_second=10,
font_size=30,
font_color="white",
add_progress_bar=True,
progress_bar_color="white",
progress_bar_height=5,
out_gif=None,
download=False,
apply_fmask=True,
nd_bands=None,
nd_threshold=0,
nd_palette=["black", "blue"],
):
"""Adds a Landsat timelapse to the map.
Args:
layer_name (str, optional): Layer name to show under the layer control. Defaults to 'Timelapse'.
roi (object, optional): Region of interest to create the timelapse. Defaults to None.
label (str, optional): A label to shown on the GIF, such as place name. Defaults to None.
start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].
vis_params (dict, optional): Visualization parameters. Defaults to None.
dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.
frames_per_second (int, optional): Animation speed. Defaults to 10.
font_size (int, optional): Font size of the animated text and label. Defaults to 30.
font_color (str, optional): Font color of the animated text and label. Defaults to 'black'.
add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.
progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.
progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.
out_gif (str, optional): File path to the output animated GIF. Defaults to None.
download (bool, optional): Whether to download the gif. Defaults to False.
apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.
nd_bands (list, optional): A list of names specifying the bands to use, e.g., ['Green', 'SWIR1']. The normalized difference is computed as (first − second) / (first + second). Note that negative input values are forced to 0 so that the result is confined to the range (-1, 1).
nd_threshold (float, optional): The threshold for extacting pixels from the normalized difference band.
nd_palette (str, optional): The color palette to use for displaying the normalized difference band.
"""
try:
if roi is None:
if self.draw_last_feature is not None:
feature = self.draw_last_feature
roi = feature.geometry()
else:
roi = ee.Geometry.Polygon(
[
[
[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718],
]
],
None,
False,
)
elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):
roi = roi.geometry()
elif isinstance(roi, ee.Geometry):
pass
else:
print("The provided roi is invalid. It must be an ee.Geometry")
return
geojson = ee_to_geojson(roi)
bounds = minimum_bounding_box(geojson)
geojson = adjust_longitude(geojson)
roi = ee.Geometry(geojson)
in_gif = landsat_ts_gif(
roi=roi,
out_gif=out_gif,
start_year=start_year,
end_year=end_year,
start_date=start_date,
end_date=end_date,
bands=bands,
vis_params=vis_params,
dimensions=dimensions,
frames_per_second=frames_per_second,
apply_fmask=apply_fmask,
nd_bands=nd_bands,
nd_threshold=nd_threshold,
nd_palette=nd_palette,
)
in_nd_gif = in_gif.replace(".gif", "_nd.gif")
print("Adding animated text to GIF ...")
add_text_to_gif(
in_gif,
in_gif,
xy=("2%", "2%"),
text_sequence=start_year,
font_size=font_size,
font_color=font_color,
duration=int(1000 / frames_per_second),
add_progress_bar=add_progress_bar,
progress_bar_color=progress_bar_color,
progress_bar_height=progress_bar_height,
)
if nd_bands is not None:
add_text_to_gif(
in_nd_gif,
in_nd_gif,
xy=("2%", "2%"),
text_sequence=start_year,
font_size=font_size,
font_color=font_color,
duration=int(1000 / frames_per_second),
add_progress_bar=add_progress_bar,
progress_bar_color=progress_bar_color,
progress_bar_height=progress_bar_height,
)
if label is not None:
add_text_to_gif(
in_gif,
in_gif,
xy=("2%", "90%"),
text_sequence=label,
font_size=font_size,
font_color=font_color,
duration=int(1000 / frames_per_second),
add_progress_bar=add_progress_bar,
progress_bar_color=progress_bar_color,
progress_bar_height=progress_bar_height,
)
# if nd_bands is not None:
# add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '90%'), text_sequence=label,
# font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if is_tool("ffmpeg"):
reduce_gif_size(in_gif)
if nd_bands is not None:
reduce_gif_size(in_nd_gif)
print("Adding GIF to the map ...")
self.image_overlay(url=in_gif, bounds=bounds, name=layer_name)
if nd_bands is not None:
self.image_overlay(
url=in_nd_gif, bounds=bounds, name=layer_name + " ND"
)
print("The timelapse has been added to the map.")
if download:
link = create_download_link(
in_gif,
title="Click here to download the Landsat timelapse: ",
)
display(link)
if nd_bands is not None:
link2 = create_download_link(
in_nd_gif,
title="Click here to download the Normalized Difference Index timelapse: ",
)
display(link2)
except Exception as e:
raise Exception(e)
def to_html(
self,
outfile,
title="My Map",
width="100%",
height="880px",
add_layer_control=True,
):
"""Saves the map as a HTML file.
Args:
outfile (str): The output file path to the HTML file.
title (str, optional): The title of the HTML file. Defaults to 'My Map'.
width (str, optional): The width of the map in pixels or percentage. Defaults to '100%'.
height (str, optional): The height of the map in pixels. Defaults to '880px'.
add_layer_control (bool, optional): Whether to add the LayersControl. Defaults to True.
"""
try:
if not outfile.endswith(".html"):
print("The output file must end with .html")
return
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if add_layer_control and self.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
self.layer_control = layer_control
self.add_control(layer_control)
before_width = self.layout.width
before_height = self.layout.height
if not isinstance(width, str):
print("width must be a string.")
return
elif width.endswith("px") or width.endswith("%"):
pass
else:
print("width must end with px or %")
return
if not isinstance(height, str):
print("height must be a string.")
return
elif not height.endswith("px"):
print("height must end with px")
return
self.layout.width = width
self.layout.height = height
self.save(outfile, title=title)
self.layout.width = before_width
self.layout.height = before_height
except Exception as e:
raise Exception(e)
def to_image(self, outfile=None, monitor=1):
"""Saves the map as a PNG or JPG image.
Args:
outfile (str, optional): The output file path to the image. Defaults to None.
monitor (int, optional): The monitor to take the screenshot. Defaults to 1.
"""
if outfile is None:
outfile = os.path.join(os.getcwd(), "my_map.png")
if outfile.endswith(".png") or outfile.endswith(".jpg"):
pass
else:
print("The output file must be a PNG or JPG image.")
return
work_dir = os.path.dirname(outfile)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
screenshot = screen_capture(outfile, monitor)
self.screenshot = screenshot
def toolbar_reset(self):
"""Reset the toolbar so that no tool is selected."""
toolbar_grid = self.toolbar
for tool in toolbar_grid.children:
tool.value = False
def add_raster(
self,
image,
bands=None,
layer_name=None,
colormap=None,
x_dim="x",
y_dim="y",
):
"""Adds a local raster dataset to the map.
Args:
image (str): The image file path.
bands (int or list, optional): The image bands to use. It can be either a nubmer (e.g., 1) or a list (e.g., [3, 2, 1]). Defaults to None.
layer_name (str, optional): The layer name to use for the raster. Defaults to None.
colormap (str, optional): The name of the colormap to use for the raster, such as 'gray' and 'terrain'. More can be found at https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html. Defaults to None.
x_dim (str, optional): The x dimension. Defaults to 'x'.
y_dim (str, optional): The y dimension. Defaults to 'y'.
"""
try:
import xarray_leaflet
except Exception:
# import platform
# if platform.system() != "Windows":
# # install_from_github(
# # url='https://github.com/davidbrochart/xarray_leaflet')
# check_install('xarray_leaflet')
# import xarray_leaflet
# else:
raise ImportError(
"You need to install xarray_leaflet first. See https://github.com/davidbrochart/xarray_leaflet"
)
import warnings
import numpy as np
import rioxarray
# import xarray as xr
import matplotlib.pyplot as plt
warnings.simplefilter("ignore")
if not os.path.exists(image):
print("The image file does not exist.")
return
if colormap is None:
colormap = plt.cm.inferno
if layer_name is None:
layer_name = "Layer_" + random_string()
if isinstance(colormap, str):
colormap = plt.cm.get_cmap(name=colormap)
da = rioxarray.open_rasterio(image, masked=True)
# print(da.rio.nodata)
multi_band = False
if len(da.band) > 1:
multi_band = True
if bands is None:
bands = [3, 2, 1]
else:
bands = 1
if multi_band:
da = da.rio.write_nodata(0)
else:
da = da.rio.write_nodata(np.nan)
da = da.sel(band=bands)
# crs = da.rio.crs
# nan = da.attrs['nodatavals'][0]
# da = da / da.max()
# # if multi_band:
# da = xr.where(da == nan, np.nan, da)
# da = da.rio.write_nodata(0)
# da = da.rio.write_crs(crs)
if multi_band and type(bands) == list:
layer = da.leaflet.plot(self, x_dim=x_dim, y_dim=y_dim, rgb_dim="band")
else:
layer = da.leaflet.plot(self, x_dim=x_dim, y_dim=y_dim, colormap=colormap)
layer.name = layer_name
def remove_drawn_features(self):
"""Removes user-drawn geometries from the map"""
if self.draw_layer is not None:
self.remove_layer(self.draw_layer)
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.chart_values = []
self.chart_points = []
self.chart_labels = None
if self.draw_control is not None:
self.draw_control.clear()
def remove_last_drawn(self):
"""Removes user-drawn geometries from the map"""
if self.draw_layer is not None:
collection = ee.FeatureCollection(self.draw_features[:-1])
ee_draw_layer = ee_tile_layer(
collection, {"color": "blue"}, "Drawn Features", True, 0.5
)
if self.draw_count == 1:
self.remove_drawn_features()
else:
self.substitute_layer(self.draw_layer, ee_draw_layer)
self.draw_layer = ee_draw_layer
self.draw_count -= 1
self.draw_features = self.draw_features[:-1]
self.draw_last_feature = self.draw_features[-1]
self.draw_layer = ee_draw_layer
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = ee.Feature(
collection.toList(collection.size()).get(
collection.size().subtract(1)
)
).geometry()
self.user_rois = collection
self.chart_values = self.chart_values[:-1]
self.chart_points = self.chart_points[:-1]
# self.chart_labels = None
def extract_values_to_points(self, filename):
"""Exports pixel values to a csv file based on user-drawn geometries.
Args:
filename (str): The output file path to the csv file or shapefile.
"""
import csv
filename = os.path.abspath(filename)
allowed_formats = ["csv", "shp"]
ext = filename[-3:]
if ext not in allowed_formats:
print(
"The output file must be one of the following: {}".format(
", ".join(allowed_formats)
)
)
return
out_dir = os.path.dirname(filename)
out_csv = filename[:-3] + "csv"
out_shp = filename[:-3] + "shp"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
count = len(self.chart_points)
out_list = []
if count > 0:
header = ["id", "longitude", "latitude"] + self.chart_labels
out_list.append(header)
for i in range(0, count):
id = i + 1
line = [id] + self.chart_points[i] + self.chart_values[i]
out_list.append(line)
with open(out_csv, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(out_list)
if ext == "csv":
print("The csv file has been saved to: {}".format(out_csv))
else:
csv_to_shp(out_csv, out_shp)
print("The shapefile has been saved to: {}".format(out_shp))
def create_vis_widget(self, layer_dict):
"""Create a GUI for changing layer visualization parameters interactively.
Args:
layer_dict (dict): A dict containning information about the layer. It is an element from Map.ee_layer_dict.
Returns:
object: An ipywidget.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
ee_object = layer_dict["ee_object"]
ee_layer = layer_dict["ee_layer"]
vis_params = layer_dict["vis_params"]
layer_name = ee_layer.name
layer_opacity = ee_layer.opacity
band_names = None
min_value = 0
max_value = 100
sel_bands = None
layer_palette = []
layer_gamma = 1
left_value = 0
right_value = 10000
self.colorbar_widget = widgets.Output(layout=widgets.Layout(height="60px"))
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
# def vdir(obj): # Get branca colormap list
# return [x for x in dir(obj) if not x.startswith("_")]
if isinstance(ee_object, ee.Image):
band_names = ee_object.bandNames().getInfo()
band_count = len(band_names)
if "min" in vis_params.keys():
min_value = vis_params["min"]
if min_value < left_value:
left_value = min_value - max_value
if "max" in vis_params.keys():
max_value = vis_params["max"]
right_value = 2 * max_value
if "gamma" in vis_params.keys():
layer_gamma = vis_params["gamma"]
if "bands" in vis_params.keys():
sel_bands = vis_params["bands"]
if "palette" in vis_params.keys():
layer_palette = [
color.replace("#", "") for color in list(vis_params["palette"])
]
vis_widget = widgets.VBox(
layout=widgets.Layout(padding="5px 5px 5px 8px", width="330px")
)
label = widgets.Label(value=f"{layer_name} visualization parameters")
radio1 = widgets.RadioButtons(
options=["1 band (Grayscale)"], layout={"width": "max-content"}
)
radio2 = widgets.RadioButtons(
options=["3 bands (RGB)"], layout={"width": "max-content"}
)
radio1.index = None
radio2.index = None
dropdown_width = "98px"
band1_dropdown = widgets.Dropdown(
options=band_names,
value=band_names[0],
layout=widgets.Layout(width=dropdown_width),
)
band2_dropdown = widgets.Dropdown(
options=band_names,
value=band_names[0],
layout=widgets.Layout(width=dropdown_width),
)
band3_dropdown = widgets.Dropdown(
options=band_names,
value=band_names[0],
layout=widgets.Layout(width=dropdown_width),
)
bands_hbox = widgets.HBox()
legend_chk = widgets.Checkbox(
value=False,
description="Legend",
indent=False,
layout=widgets.Layout(width="70px"),
)
color_picker = widgets.ColorPicker(
concise=False,
value="#000000",
layout=widgets.Layout(width="116px"),
style={"description_width": "initial"},
)
add_color = widgets.Button(
icon="plus",
tooltip="Add a hex color string to the palette",
layout=widgets.Layout(width="32px"),
)
del_color = widgets.Button(
icon="minus",
tooltip="Remove a hex color string from the palette",
layout=widgets.Layout(width="32px"),
)
reset_color = widgets.Button(
icon="eraser",
tooltip="Remove all color strings from the palette",
layout=widgets.Layout(width="34px"),
)
classes = widgets.Dropdown(
options=["Any"] + [str(i) for i in range(3, 13)],
description="Classes:",
layout=widgets.Layout(width="115px"),
style={"description_width": "initial"},
)
colormap = widgets.Dropdown(
options=plt.colormaps(),
value=None,
description="Colormap:",
layout=widgets.Layout(width="181px"),
style={"description_width": "initial"},
)
def classes_changed(change):
if change["new"]:
selected = change["owner"].value
if colormap.value is not None:
n_class = None
if selected != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(
vmin=value_range.value[0], vmax=value_range.value[1]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join([color for color in cmap_colors])
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}"
for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
classes.observe(classes_changed, "value")
palette = widgets.Text(
value=", ".join(layer_palette),
placeholder="List of hex color code (RRGGBB)",
description="Palette:",
tooltip="Enter a list of hex color code (RRGGBB)",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
def add_color_clicked(b):
if color_picker.value is not None:
if len(palette.value) == 0:
palette.value = color_picker.value[1:]
else:
palette.value += ", " + color_picker.value[1:]
def del_color_clicked(b):
if "," in palette.value:
items = [item.strip() for item in palette.value.split(",")]
palette.value = ", ".join(items[:-1])
else:
palette.value = ""
def reset_color_clicked(b):
palette.value = ""
add_color.on_click(add_color_clicked)
del_color.on_click(del_color_clicked)
reset_color.on_click(reset_color_clicked)
spacer = widgets.Label(layout=widgets.Layout(width="5px"))
v_spacer = widgets.Label(layout=widgets.Layout(height="5px"))
radio_btn = widgets.HBox([radio1, spacer, spacer, spacer, radio2])
value_range = widgets.FloatRangeSlider(
value=[min_value, max_value],
min=left_value,
max=right_value,
step=0.1,
description="Range:",
disabled=False,
continuous_update=False,
readout=True,
readout_format=".1f",
layout=widgets.Layout(width="300px"),
style={"description_width": "45px"},
)
range_hbox = widgets.HBox([value_range, spacer])
opacity = widgets.FloatSlider(
value=layer_opacity,
min=0,
max=1,
step=0.01,
description="Opacity:",
continuous_update=False,
readout=True,
readout_format=".2f",
layout=widgets.Layout(width="320px"),
style={"description_width": "50px"},
)
gamma = widgets.FloatSlider(
value=layer_gamma,
min=0.1,
max=10,
step=0.01,
description="Gamma:",
continuous_update=False,
readout=True,
readout_format=".2f",
layout=widgets.Layout(width="320px"),
style={"description_width": "50px"},
)
legend_chk = widgets.Checkbox(
value=False,
description="Legend",
indent=False,
layout=widgets.Layout(width="70px"),
)
linear_chk = widgets.Checkbox(
value=True,
description="Linear colormap",
indent=False,
layout=widgets.Layout(width="150px"),
)
step_chk = widgets.Checkbox(
value=False,
description="Step colormap",
indent=False,
layout=widgets.Layout(width="140px"),
)
legend_title = widgets.Text(
value="Legend",
description="Legend title:",
tooltip="Enter a title for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
legend_labels = widgets.Text(
value="Class 1, Class 2, Class 3",
description="Legend labels:",
tooltip="Enter a a list of labels for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
colormap_hbox = widgets.HBox([linear_chk, step_chk])
legend_vbox = widgets.VBox()
def linear_chk_changed(change):
if change["new"]:
step_chk.value = False
legend_vbox.children = [colormap_hbox]
else:
step_chk.value = True
def step_chk_changed(change):
if change["new"]:
linear_chk.value = False
if len(layer_palette) > 0:
legend_labels.value = ",".join(
[
"Class " + str(i)
for i in range(1, len(layer_palette) + 1)
]
)
legend_vbox.children = [
colormap_hbox,
legend_title,
legend_labels,
]
else:
linear_chk.value = True
linear_chk.observe(linear_chk_changed, "value")
step_chk.observe(step_chk_changed, "value")
def colormap_changed(change):
if change["new"]:
n_class = None
if classes.value != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(
vmin=value_range.value[0], vmax=value_range.value[1]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join(cmap_colors)
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
# display(colorbar)
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}" for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
colormap.observe(colormap_changed, "value")
btn_width = "97.5px"
import_btn = widgets.Button(
description="Import",
button_style="primary",
tooltip="Import vis params to notebook",
layout=widgets.Layout(width=btn_width),
)
apply_btn = widgets.Button(
description="Apply",
tooltip="Apply vis params to the layer",
layout=widgets.Layout(width=btn_width),
)
close_btn = widgets.Button(
description="Close",
tooltip="Close vis params diaglog",
layout=widgets.Layout(width=btn_width),
)
def import_btn_clicked(b):
vis = {}
if radio1.index == 0:
vis["bands"] = [band1_dropdown.value]
if len(palette.value) > 0:
vis["palette"] = palette.value.split(",")
else:
vis["bands"] = [
band1_dropdown.value,
band2_dropdown.value,
band3_dropdown.value,
]
vis["min"] = value_range.value[0]
vis["max"] = value_range.value[1]
vis["opacity"] = opacity.value
vis["gamma"] = gamma.value
create_code_cell(f"vis_params = {str(vis)}")
def apply_btn_clicked(b):
vis = {}
if radio1.index == 0:
vis["bands"] = [band1_dropdown.value]
if len(palette.value) > 0:
vis["palette"] = [c.strip() for c in palette.value.split(",")]
else:
vis["bands"] = [
band1_dropdown.value,
band2_dropdown.value,
band3_dropdown.value,
]
vis["gamma"] = gamma.value
vis["min"] = value_range.value[0]
vis["max"] = value_range.value[1]
self.addLayer(ee_object, vis, layer_name, True, opacity.value)
ee_layer.visible = False
if legend_chk.value:
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
if (
"colorbar" in layer_dict.keys()
and layer_dict["colorbar"] in self.controls
):
self.remove_control(layer_dict["colorbar"])
layer_dict["colorbar"] = None
if linear_chk.value:
if (
"legend" in layer_dict.keys()
and layer_dict["legend"] in self.controls
):
self.remove_control(layer_dict["legend"])
layer_dict["legend"] = None
if len(palette.value) > 0 and "," in palette.value:
colors = to_hex_colors(
[color.strip() for color in palette.value.split(",")]
)
self.add_colorbar(
vis_params={
"palette": colors,
"min": value_range.value[0],
"max": value_range.value[1],
},
layer_name=layer_name,
)
elif step_chk.value:
if len(palette.value) > 0 and "," in palette.value:
colors = to_hex_colors(
[color.strip() for color in palette.value.split(",")]
)
labels = [
label.strip()
for label in legend_labels.value.split(",")
]
self.add_legend(
legend_title=legend_title.value,
legend_keys=labels,
legend_colors=colors,
layer_name=layer_name,
)
else:
if radio1.index == 0 and "palette" in vis:
self.colorbar_widget.clear_output()
with self.colorbar_widget:
_, ax = plt.subplots(figsize=(6, 0.4))
colors = to_hex_colors(vis["palette"])
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors, N=256
)
norm = mpl.colors.Normalize(
vmin=vis["min"], vmax=vis["max"]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
plt.show()
if (
"colorbar" in layer_dict.keys()
and layer_dict["colorbar"] in self.controls
):
self.remove_control(layer_dict["colorbar"])
layer_dict["colorbar"] = None
if (
"legend" in layer_dict.keys()
and layer_dict["legend"] in self.controls
):
self.remove_control(layer_dict["legend"])
layer_dict["legend"] = None
def close_btn_clicked(b):
if self.vis_control in self.controls:
self.remove_control(self.vis_control)
self.vis_control = None
self.vis_widget.close()
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl = None
self.colorbar_widget.close()
import_btn.on_click(import_btn_clicked)
apply_btn.on_click(apply_btn_clicked)
close_btn.on_click(close_btn_clicked)
color_hbox = widgets.HBox(
[legend_chk, color_picker, add_color, del_color, reset_color]
)
btn_hbox = widgets.HBox([import_btn, apply_btn, close_btn])
gray_box = [
label,
radio_btn,
bands_hbox,
v_spacer,
range_hbox,
opacity,
gamma,
widgets.HBox([classes, colormap]),
palette,
color_hbox,
legend_vbox,
btn_hbox,
]
rgb_box = [
label,
radio_btn,
bands_hbox,
v_spacer,
range_hbox,
opacity,
gamma,
btn_hbox,
]
def legend_chk_changed(change):
if change["new"]:
linear_chk.value = True
legend_vbox.children = [
widgets.HBox([linear_chk, step_chk]),
# legend_title,
# legend_labels,
]
else:
legend_vbox.children = []
legend_chk.observe(legend_chk_changed, "value")
if band_count < 3:
radio1.index = 0
band1_dropdown.layout.width = "300px"
bands_hbox.children = [band1_dropdown]
vis_widget.children = gray_box
legend_chk.value = False
if len(palette.value) > 0 and "," in palette.value:
import matplotlib as mpl
import matplotlib.pyplot as plt
colors = to_hex_colors(
[color.strip() for color in palette.value.split(",")]
)
self.colorbar_widget.clear_output()
with self.colorbar_widget:
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors, N=256
)
norm = mpl.colors.Normalize(
vmin=value_range.value[0], vmax=value_range.value[1]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
plt.show()
else:
radio2.index = 0
if (sel_bands is None) or (len(sel_bands) < 2):
sel_bands = band_names[0:3]
band1_dropdown.value = sel_bands[0]
band2_dropdown.value = sel_bands[1]
band3_dropdown.value = sel_bands[2]
bands_hbox.children = [
band1_dropdown,
band2_dropdown,
band3_dropdown,
]
vis_widget.children = rgb_box
def radio1_observer(sender):
radio2.unobserve(radio2_observer, names=["value"])
radio2.index = None
radio2.observe(radio2_observer, names=["value"])
band1_dropdown.layout.width = "300px"
bands_hbox.children = [band1_dropdown]
palette.value = ", ".join(layer_palette)
palette.disabled = False
color_picker.disabled = False
add_color.disabled = False
del_color.disabled = False
reset_color.disabled = False
vis_widget.children = gray_box
if len(palette.value) > 0 and "," in palette.value:
colors = [color.strip() for color in palette.value.split(",")]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(colors), N=256
)
norm = mpl.colors.Normalize(vmin=0, vmax=1)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
if self.colorbar_ctrl not in self.controls:
self.add_control(self.colorbar_ctrl)
self.colorbar_widget.clear_output()
with self.colorbar_widget:
plt.show()
def radio2_observer(sender):
radio1.unobserve(radio1_observer, names=["value"])
radio1.index = None
radio1.observe(radio1_observer, names=["value"])
band1_dropdown.layout.width = dropdown_width
bands_hbox.children = [
band1_dropdown,
band2_dropdown,
band3_dropdown,
]
palette.value = ""
palette.disabled = True
color_picker.disabled = True
add_color.disabled = True
del_color.disabled = True
reset_color.disabled = True
vis_widget.children = rgb_box
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
radio1.observe(radio1_observer, names=["value"])
radio2.observe(radio2_observer, names=["value"])
return vis_widget
elif isinstance(ee_object, ee.FeatureCollection):
vis_widget = widgets.VBox(
layout=widgets.Layout(padding="5px 5px 5px 8px", width="330px")
)
label = widgets.Label(value=f"{layer_name} visualization parameters")
new_layer_name = widgets.Text(
value=f"{layer_name} style",
description="New layer name:",
style={"description_width": "initial"},
)
color = widgets.ColorPicker(
concise=False,
value="#000000",
description="Color:",
layout=widgets.Layout(width="140px"),
style={"description_width": "initial"},
)
color_opacity = widgets.FloatSlider(
value=layer_opacity,
min=0,
max=1,
step=0.01,
description="Opacity:",
continuous_update=True,
readout=False,
# readout_format=".2f",
layout=widgets.Layout(width="130px"),
style={"description_width": "50px"},
)
color_opacity_label = widgets.Label(
style={"description_width": "initial"},
layout=widgets.Layout(padding="0px"),
)
widgets.jslink((color_opacity, "value"), (color_opacity_label, "value"))
point_size = widgets.IntText(
value=3,
description="Point size:",
layout=widgets.Layout(width="110px"),
style={"description_width": "initial"},
)
point_shape_options = [
"circle",
"square",
"diamond",
"cross",
"plus",
"pentagram",
"hexagram",
"triangle",
"triangle_up",
"triangle_down",
"triangle_left",
"triangle_right",
"pentagon",
"hexagon",
"star5",
"star6",
]
point_shape = widgets.Dropdown(
options=point_shape_options,
value="circle",
description="Point shape:",
layout=widgets.Layout(width="185px"),
style={"description_width": "initial"},
)
line_width = widgets.IntText(
value=2,
description="Line width:",
layout=widgets.Layout(width="110px"),
style={"description_width": "initial"},
)
line_type = widgets.Dropdown(
options=["solid", "dotted", "dashed"],
value="solid",
description="Line type:",
layout=widgets.Layout(width="185px"),
style={"description_width": "initial"},
)
fill_color = widgets.ColorPicker(
concise=False,
value="#000000",
description="Fill Color:",
layout=widgets.Layout(width="160px"),
style={"description_width": "initial"},
)
fill_color_opacity = widgets.FloatSlider(
value=0.66,
min=0,
max=1,
step=0.01,
description="Opacity:",
continuous_update=True,
readout=False,
# readout_format=".2f",
layout=widgets.Layout(width="110px"),
style={"description_width": "50px"},
)
fill_color_opacity_label = widgets.Label(
style={"description_width": "initial"},
layout=widgets.Layout(padding="0px"),
)
widgets.jslink(
(fill_color_opacity, "value"),
(fill_color_opacity_label, "value"),
)
color_picker = widgets.ColorPicker(
concise=False,
value="#000000",
layout=widgets.Layout(width="116px"),
style={"description_width": "initial"},
)
add_color = widgets.Button(
icon="plus",
tooltip="Add a hex color string to the palette",
layout=widgets.Layout(width="32px"),
)
del_color = widgets.Button(
icon="minus",
tooltip="Remove a hex color string from the palette",
layout=widgets.Layout(width="32px"),
)
reset_color = widgets.Button(
icon="eraser",
tooltip="Remove all color strings from the palette",
layout=widgets.Layout(width="34px"),
)
palette = widgets.Text(
value="",
placeholder="List of hex code (RRGGBB) separated by comma",
description="Palette:",
tooltip="Enter a list of hex code (RRGGBB) separated by comma",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
legend_title = widgets.Text(
value="Legend",
description="Legend title:",
tooltip="Enter a title for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
legend_labels = widgets.Text(
value="Labels",
description="Legend labels:",
tooltip="Enter a a list of labels for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
def add_color_clicked(b):
if color_picker.value is not None:
if len(palette.value) == 0:
palette.value = color_picker.value[1:]
else:
palette.value += ", " + color_picker.value[1:]
def del_color_clicked(b):
if "," in palette.value:
items = [item.strip() for item in palette.value.split(",")]
palette.value = ", ".join(items[:-1])
else:
palette.value = ""
def reset_color_clicked(b):
palette.value = ""
add_color.on_click(add_color_clicked)
del_color.on_click(del_color_clicked)
reset_color.on_click(reset_color_clicked)
field = widgets.Dropdown(
options=[],
value=None,
description="Field:",
layout=widgets.Layout(width="140px"),
style={"description_width": "initial"},
)
field_values = widgets.Dropdown(
options=[],
value=None,
description="Values:",
layout=widgets.Layout(width="156px"),
style={"description_width": "initial"},
)
classes = widgets.Dropdown(
options=["Any"] + [str(i) for i in range(3, 13)],
description="Classes:",
layout=widgets.Layout(width="115px"),
style={"description_width": "initial"},
)
colormap = widgets.Dropdown(
options=["viridis"],
value="viridis",
description="Colormap:",
layout=widgets.Layout(width="181px"),
style={"description_width": "initial"},
)
def classes_changed(change):
if change["new"]:
selected = change["owner"].value
if colormap.value is not None:
n_class = None
if selected != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(vmin=0, vmax=1)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join([color for color in cmap_colors])
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}"
for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
classes.observe(classes_changed, "value")
def colormap_changed(change):
if change["new"]:
n_class = None
if classes.value != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(vmin=0, vmax=1)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join(cmap_colors)
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
# display(colorbar)
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}" for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
colormap.observe(colormap_changed, "value")
btn_width = "97.5px"
import_btn = widgets.Button(
description="Import",
button_style="primary",
tooltip="Import vis params to notebook",
layout=widgets.Layout(width=btn_width),
)
apply_btn = widgets.Button(
description="Apply",
tooltip="Apply vis params to the layer",
layout=widgets.Layout(width=btn_width),
)
close_btn = widgets.Button(
description="Close",
tooltip="Close vis params diaglog",
layout=widgets.Layout(width=btn_width),
)
style_chk = widgets.Checkbox(
value=False,
description="Style by attribute",
indent=False,
layout=widgets.Layout(width="140px"),
)
legend_chk = widgets.Checkbox(
value=False,
description="Legend",
indent=False,
layout=widgets.Layout(width="70px"),
)
compute_label = widgets.Label(value="")
style_vbox = widgets.VBox([widgets.HBox([style_chk, compute_label])])
def style_chk_changed(change):
if change["new"]:
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
self.colorbar_ctrl = WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
fill_color.disabled = True
colormap.options = plt.colormaps()
colormap.value = "viridis"
style_vbox.children = [
widgets.HBox([style_chk, compute_label]),
widgets.HBox([field, field_values]),
widgets.HBox([classes, colormap]),
palette,
widgets.HBox(
[
legend_chk,
color_picker,
add_color,
del_color,
reset_color,
]
),
]
compute_label.value = "Computing ..."
field.options = (
ee.Feature(ee_object.first()).propertyNames().getInfo()
)
compute_label.value = ""
classes.value = "Any"
legend_chk.value = False
else:
fill_color.disabled = False
style_vbox.children = [widgets.HBox([style_chk, compute_label])]
compute_label.value = ""
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl = None
self.colorbar_widget = None
# legend_chk.value = False
style_chk.observe(style_chk_changed, "value")
def legend_chk_changed(change):
if change["new"]:
style_vbox.children = list(style_vbox.children) + [
widgets.VBox([legend_title, legend_labels])
]
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}" for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
else:
style_vbox.children = [
widgets.HBox([style_chk, compute_label]),
widgets.HBox([field, field_values]),
widgets.HBox([classes, colormap]),
palette,
widgets.HBox(
[
legend_chk,
color_picker,
add_color,
del_color,
reset_color,
]
),
]
legend_chk.observe(legend_chk_changed, "value")
def field_changed(change):
if change["new"]:
compute_label.value = "Computing ..."
options = ee_object.aggregate_array(field.value).getInfo()
if options is not None:
options = list(set(options))
options.sort()
field_values.options = options
compute_label.value = ""
field.observe(field_changed, "value")
def get_vis_params():
vis = {}
vis["color"] = color.value[1:] + str(
hex(int(color_opacity.value * 255))
)[2:].zfill(2)
if geometry_type(ee_object) in ["Point", "MultiPoint"]:
vis["pointSize"] = point_size.value
vis["pointShape"] = point_shape.value
vis["width"] = line_width.value
vis["lineType"] = line_type.value
vis["fillColor"] = fill_color.value[1:] + str(
hex(int(fill_color_opacity.value * 255))
)[2:].zfill(2)
return vis
def import_btn_clicked(b):
vis = get_vis_params()
create_code_cell(f"vis_params = {str(vis)}")
def apply_btn_clicked(b):
compute_label.value = "Computing ..."
if new_layer_name.value in self.ee_layer_names:
old_layer = new_layer_name.value
if "legend" in self.ee_layer_dict[old_layer].keys():
legend = self.ee_layer_dict[old_layer]["legend"]
if legend in self.controls:
self.remove_control(legend)
legend.close()
if "colorbar" in self.ee_layer_dict[old_layer].keys():
colorbar = self.ee_layer_dict[old_layer]["colorbar"]
if colorbar in self.controls:
self.remove_control(colorbar)
colorbar.close()
if not style_chk.value:
vis = get_vis_params()
self.addLayer(ee_object.style(**vis), {}, new_layer_name.value)
ee_layer.visible = False
elif (
style_chk.value and len(palette.value) > 0 and "," in palette.value
):
colors = ee.List(
[
color.strip()
+ str(hex(int(fill_color_opacity.value * 255)))[2:].zfill(2)
for color in palette.value.split(",")
]
)
arr = ee_object.aggregate_array(field.value).distinct().sort()
fc = ee_object.map(
lambda f: f.set({"styleIndex": arr.indexOf(f.get(field.value))})
)
step = arr.size().divide(colors.size()).ceil()
fc = fc.map(
lambda f: f.set(
{
"style": {
"color": color.value[1:]
+ str(hex(int(color_opacity.value * 255)))[
2:
].zfill(2),
"pointSize": point_size.value,
"pointShape": point_shape.value,
"width": line_width.value,
"lineType": line_type.value,
"fillColor": colors.get(
ee.Number(
ee.Number(f.get("styleIndex")).divide(step)
).floor()
),
}
}
)
)
self.addLayer(
fc.style(**{"styleProperty": "style"}),
{},
f"{new_layer_name.value}",
)
if (
len(palette.value)
and legend_chk.value
and len(legend_labels.value) > 0
):
legend_colors = [
color.strip() for color in palette.value.split(",")
]
legend_keys = [
label.strip() for label in legend_labels.value.split(",")
]
self.add_legend(
legend_title=legend_title.value,
legend_keys=legend_keys,
legend_colors=legend_colors,
layer_name=new_layer_name.value,
)
ee_layer.visible = False
compute_label.value = ""
def close_btn_clicked(b):
self.remove_control(self.vis_control)
self.vis_control.close()
self.vis_widget.close()
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
import_btn.on_click(import_btn_clicked)
apply_btn.on_click(apply_btn_clicked)
close_btn.on_click(close_btn_clicked)
vis_widget.children = [
label,
new_layer_name,
widgets.HBox([color, color_opacity, color_opacity_label]),
widgets.HBox([point_size, point_shape]),
widgets.HBox([line_width, line_type]),
widgets.HBox(
[fill_color, fill_color_opacity, fill_color_opacity_label]
),
style_vbox,
widgets.HBox([import_btn, apply_btn, close_btn]),
]
if geometry_type(ee_object) in ["Point", "MultiPoint"]:
point_size.disabled = False
point_shape.disabled = False
else:
point_size.disabled = True
point_shape.disabled = True
return vis_widget
def add_styled_vector(
self, ee_object, column, palette, layer_name="Untitled", **kwargs
):
"""Adds a styled vector to the map.
Args:
ee_object (object): An ee.FeatureCollection.
column (str): The column name to use for styling.
palette (list | dict): The palette (e.g., list of colors or a dict containing label and color pairs) to use for styling.
layer_name (str, optional): The name to be used for the new layer. Defaults to "Untitled".
"""
styled_vector = vector_styling(ee_object, column, palette, **kwargs)
self.addLayer(styled_vector.style(**{"styleProperty": "style"}), {}, layer_name)
def add_shapefile(
self,
in_shp,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
):
"""Adds a shapefile to the map.
Args:
in_shp (str): The input file path to the shapefile.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
Raises:
FileNotFoundError: The provided shapefile could not be found.
"""
in_shp = os.path.abspath(in_shp)
if not os.path.exists(in_shp):
raise FileNotFoundError("The provided shapefile could not be found.")
geojson = shp_to_geojson(in_shp)
self.add_geojson(
geojson,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
def add_geojson(
self,
in_geojson,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
):
"""Adds a GeoJSON file to the map.
Args:
in_geojson (str | dict): The file path or http URL to the input GeoJSON or a dictionary containing the geojson.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
Raises:
FileNotFoundError: The provided GeoJSON file could not be found.
"""
import json
import random
import requests
try:
if isinstance(in_geojson, str):
if in_geojson.startswith("http"):
data = requests.get(in_geojson).json()
else:
in_geojson = os.path.abspath(in_geojson)
if not os.path.exists(in_geojson):
raise FileNotFoundError(
"The provided GeoJSON file could not be found."
)
with open(in_geojson, encoding="utf-8") as f:
data = json.load(f)
elif isinstance(in_geojson, dict):
data = in_geojson
else:
raise TypeError("The input geojson must be a type of str or dict.")
except Exception as e:
raise Exception(e)
if not style:
style = {
# "stroke": True,
"color": "#000000",
"weight": 1,
"opacity": 1,
# "fill": True,
# "fillColor": "#ffffff",
"fillOpacity": 0.1,
# "dashArray": "9"
# "clickable": True,
}
elif "weight" not in style:
style["weight"] = 1
if not hover_style:
hover_style = {"weight": style["weight"] + 1, "fillOpacity": 0.5}
def random_color(feature):
return {
"color": "black",
"fillColor": random.choice(fill_colors),
}
toolbar_button = widgets.ToggleButton(
value=True,
tooltip="Toolbar",
icon="info",
layout=widgets.Layout(
width="28px", height="28px", padding="0px 0px 0px 4px"
),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
# button_style="primary",
layout=widgets.Layout(
height="28px", width="28px", padding="0px 0px 0px 4px"
),
)
html = widgets.HTML()
html.layout.margin = "0px 10px 0px 10px"
html.layout.max_height = "250px"
html.layout.max_width = "250px"
output_widget = widgets.VBox(
[widgets.HBox([toolbar_button, close_button]), html]
)
info_control = WidgetControl(widget=output_widget, position="bottomright")
if info_mode in ["on_hover", "on_click"]:
self.add_control(info_control)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
output_widget.children = [
widgets.VBox([widgets.HBox([toolbar_button, close_button]), html])
]
else:
output_widget.children = [widgets.HBox([toolbar_button, close_button])]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if info_control in self.controls:
self.remove_control(info_control)
output_widget.close()
close_button.observe(close_btn_click, "value")
def update_html(feature, **kwargs):
value = [
"<h5><b>{}: </b>{}</h5>".format(prop, feature["properties"][prop])
for prop in feature["properties"].keys()
][:-1]
value = """{}""".format("".join(value))
html.value = value
if style_callback is None:
style_callback = random_color
geojson = ipyleaflet.GeoJSON(
data=data,
style=style,
hover_style=hover_style,
style_callback=style_callback,
name=layer_name,
)
if info_mode == "on_hover":
geojson.on_hover(update_html)
elif info_mode == "on_click":
geojson.on_click(update_html)
self.add_layer(geojson)
def add_kml(
self,
in_kml,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
):
"""Adds a GeoJSON file to the map.
Args:
in_kml (str): The input file path to the KML.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
Raises:
FileNotFoundError: The provided KML file could not be found.
"""
in_kml = os.path.abspath(in_kml)
if not os.path.exists(in_kml):
raise FileNotFoundError("The provided KML file could not be found.")
self.add_vector(
in_kml,
layer_name,
style=style,
hover_style=hover_style,
style_callback=style_callback,
fill_colors=fill_colors,
info_mode=info_mode,
)
def add_vector(
self,
filename,
layer_name="Untitled",
to_ee=False,
bbox=None,
mask=None,
rows=None,
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
**kwargs,
):
"""Adds any geopandas-supported vector dataset to the map.
Args:
filename (str): Either the absolute or relative path to the file or URL to be opened, or any object with a read() method (such as an open file or StringIO).
layer_name (str, optional): The layer name to use. Defaults to "Untitled".
to_ee (bool, optional): Whether to convert the GeoJSON to ee.FeatureCollection. Defaults to False.
bbox (tuple | GeoDataFrame or GeoSeries | shapely Geometry, optional): Filter features by given bounding box, GeoSeries, GeoDataFrame or a shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with mask. Defaults to None.
mask (dict | GeoDataFrame or GeoSeries | shapely Geometry, optional): Filter for features that intersect with the given dict-like geojson geometry, GeoSeries, GeoDataFrame or shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with bbox. Defaults to None.
rows (int or slice, optional): Load in specific rows by passing an integer (first n rows) or a slice() object.. Defaults to None.
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
"""
if not filename.startswith("http"):
filename = os.path.abspath(filename)
if to_ee:
fc = vector_to_ee(
filename,
bbox=bbox,
mask=mask,
rows=rows,
geodesic=True,
**kwargs,
)
self.addLayer(fc, {}, layer_name)
else:
ext = os.path.splitext(filename)[1].lower()
if ext == ".shp":
self.add_shapefile(
filename,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
elif ext in [".json", ".geojson"]:
self.add_geojson(
filename,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
else:
geojson = vector_to_geojson(
filename,
bbox=bbox,
mask=mask,
rows=rows,
epsg="4326",
**kwargs,
)
self.add_geojson(
geojson,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
def add_time_slider(
self,
ee_object,
vis_params={},
region=None,
layer_name="Time series",
labels=None,
time_interval=1,
position="bottomright",
slider_length="150px",
date_format="YYYY-MM-dd",
):
"""Adds a time slider to the map.
Args:
ee_object (ee.Image | ee.ImageCollection): The Image or ImageCollection to visualize.
vis_params (dict, optional): Visualization parameters to use for visualizing image. Defaults to {}.
region (ee.Geometry | ee.FeatureCollection): The region to visualize.
layer_name (str, optional): The layer name to be used. Defaults to "Time series".
labels (list, optional): The list of labels to be used for the time series. Defaults to None.
time_interval (int, optional): Time interval in seconds. Defaults to 1.
position (str, optional): Position to place the time slider, can be any of ['topleft', 'topright', 'bottomleft', 'bottomright']. Defaults to "bottomright".
slider_length (str, optional): Length of the time slider. Defaults to "150px".
date_format (str, optional): The date format to use. Defaults to 'YYYY-MM-dd'.
Raises:
TypeError: If the ee_object is not ee.Image | ee.ImageCollection.
"""
import time
import threading
if isinstance(ee_object, ee.Image):
if region is not None:
if isinstance(region, ee.Geometry):
ee_object = ee_object.clip(region)
elif isinstance(region, ee.FeatureCollection):
ee_object = ee_object.clipToCollection(region)
if layer_name not in self.ee_raster_layer_names:
self.addLayer(ee_object, {}, layer_name, False)
band_names = ee_object.bandNames()
ee_object = ee.ImageCollection(
ee_object.bandNames().map(lambda b: ee_object.select([b]))
)
if labels is not None:
if len(labels) != int(ee_object.size().getInfo()):
raise ValueError(
"The length of labels must be equal to the number of bands in the image."
)
else:
labels = band_names.getInfo()
elif isinstance(ee_object, ee.ImageCollection):
if region is not None:
if isinstance(region, ee.Geometry):
ee_object = ee_object.map(lambda img: img.clip(region))
elif isinstance(region, ee.FeatureCollection):
ee_object = ee_object.map(lambda img: img.clipToCollection(region))
if labels is not None:
if len(labels) != int(ee_object.size().getInfo()):
raise ValueError(
"The length of labels must be equal to the number of images in the ImageCollection."
)
else:
labels = (
ee_object.aggregate_array("system:time_start")
.map(lambda d: ee.Date(d).format(date_format))
.getInfo()
)
else:
raise TypeError("The ee_object must be an ee.Image or ee.ImageCollection")
# if labels is not None:
# size = len(labels)
# else:
# size = ee_object.size().getInfo()
# labels = [str(i) for i in range(1, size + 1)]
first = ee.Image(ee_object.first())
if layer_name not in self.ee_raster_layer_names:
self.addLayer(ee_object.toBands(), {}, layer_name, False)
self.addLayer(first, vis_params, "Image X")
slider = widgets.IntSlider(
min=1,
max=len(labels),
readout=False,
continuous_update=False,
layout=widgets.Layout(width=slider_length),
)
label = widgets.Label(
value=labels[0], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
play_btn = widgets.Button(
icon="play",
tooltip="Play the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
pause_btn = widgets.Button(
icon="pause",
tooltip="Pause the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
play_chk = widgets.Checkbox(value=False)
slider_widget = widgets.HBox([slider, label, play_btn, pause_btn, close_btn])
def play_click(b):
play_chk.value = True
def work(slider):
while play_chk.value:
if slider.value < len(labels):
slider.value += 1
else:
slider.value = 1
time.sleep(time_interval)
thread = threading.Thread(target=work, args=(slider,))
thread.start()
def pause_click(b):
play_chk.value = False
play_btn.on_click(play_click)
pause_btn.on_click(pause_click)
def slider_changed(change):
self.default_style = {"cursor": "wait"}
index = slider.value - 1
label.value = labels[index]
image = ee.Image(ee_object.toList(ee_object.size()).get(index))
if layer_name not in self.ee_raster_layer_names:
self.addLayer(ee_object.toBands(), {}, layer_name, False)
self.addLayer(image, vis_params, "Image X")
self.default_style = {"cursor": "default"}
slider.observe(slider_changed, "value")
def close_click(b):
play_chk.value = False
self.toolbar_reset()
self.remove_ee_layer("Image X")
self.remove_ee_layer(layer_name)
if self.slider_ctrl is not None and self.slider_ctrl in self.controls:
self.remove_control(self.slider_ctrl)
slider_widget.close()
close_btn.on_click(close_click)
slider_ctrl = WidgetControl(widget=slider_widget, position=position)
self.add_control(slider_ctrl)
self.slider_ctrl = slider_ctrl
def add_xy_data(
self,
in_csv,
x="longitude",
y="latitude",
label=None,
layer_name="Marker cluster",
to_ee=False,
):
"""Adds points from a CSV file containing lat/lon information and display data on the map.
Args:
in_csv (str): The file path to the input CSV file.
x (str, optional): The name of the column containing longitude coordinates. Defaults to "longitude".
y (str, optional): The name of the column containing latitude coordinates. Defaults to "latitude".
label (str, optional): The name of the column containing label information to used for marker popup. Defaults to None.
layer_name (str, optional): The layer name to use. Defaults to "Marker cluster".
to_ee (bool, optional): Whether to convert the csv to an ee.FeatureCollection.
Raises:
FileNotFoundError: The specified input csv does not exist.
ValueError: The specified x column does not exist.
ValueError: The specified y column does not exist.
ValueError: The specified label column does not exist.
"""
import pandas as pd
if not in_csv.startswith("http") and (not os.path.exists(in_csv)):
raise FileNotFoundError("The specified input csv does not exist.")
df = pd.read_csv(in_csv)
col_names = df.columns.values.tolist()
if x not in col_names:
raise ValueError(f"x must be one of the following: {', '.join(col_names)}")
if y not in col_names:
raise ValueError(f"y must be one of the following: {', '.join(col_names)}")
if label is not None and (label not in col_names):
raise ValueError(
f"label must be one of the following: {', '.join(col_names)}"
)
self.default_style = {"cursor": "wait"}
if to_ee:
fc = csv_to_ee(in_csv, latitude=y, longitude=x)
self.addLayer(fc, {}, layer_name)
else:
points = list(zip(df[y], df[x]))
if label is not None:
labels = df[label]
markers = [
Marker(
location=point,
draggable=False,
popup=widgets.HTML(labels[index]),
)
for index, point in enumerate(points)
]
else:
markers = [Marker(location=point, draggable=False) for point in points]
marker_cluster = MarkerCluster(markers=markers, name=layer_name)
self.add_layer(marker_cluster)
self.default_style = {"cursor": "default"}
# The functions below are outside the Map class.
def ee_tile_layer(
ee_object, vis_params={}, name="Layer untitled", shown=True, opacity=1.0
):
"""Converts and Earth Engine layer to ipyleaflet TileLayer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer untitled'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
image = None
if (
not isinstance(ee_object, ee.Image)
and not isinstance(ee_object, ee.ImageCollection)
and not isinstance(ee_object, ee.FeatureCollection)
and not isinstance(ee_object, ee.Feature)
and not isinstance(ee_object, ee.Geometry)
):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(ee_object, ee.featurecollection.FeatureCollection)
):
features = ee.FeatureCollection(ee_object)
width = 2
if "width" in vis_params:
width = vis_params["width"]
color = "000000"
if "color" in vis_params:
color = vis_params["color"]
image_fill = features.style(**{"fillColor": color}).updateMask(
ee.Image.constant(0.5)
)
image_outline = features.style(
**{"color": color, "fillColor": "00000000", "width": width}
)
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = TileLayer(
url=map_id_dict["tile_fetcher"].url_format,
attribution="Google Earth Engine",
name=name,
opacity=opacity,
visible=shown,
)
return tile_layer
def linked_maps(
rows=2,
cols=2,
height="400px",
ee_objects=[],
vis_params=[],
labels=[],
label_position="topright",
**kwargs,
):
"""Create linked maps of Earth Engine data layers.
Args:
rows (int, optional): The number of rows of maps to create. Defaults to 2.
cols (int, optional): The number of columns of maps to ceate. Defaults to 2.
height (str, optional): The height of each map in pixels. Defaults to "400px".
ee_objects (list, optional): The list of Earth Engine objects to use for each map. Defaults to [].
vis_params (list, optional): The list of visualization parameters to use for each map. Defaults to [].
labels (list, optional): The list of labels to show on the map. Defaults to [].
label_position (str, optional): The position of the label, can be [topleft, topright, bottomleft, bottomright]. Defaults to "topright".
Raises:
ValueError: If the length of ee_objects is not equal to rows*cols.
ValueError: If the length of vis_params is not equal to rows*cols.
ValueError: If the length of labels is not equal to rows*cols.
Returns:
ipywidget: A GridspecLayout widget.
"""
grid = widgets.GridspecLayout(rows, cols, grid_gap="0px")
count = rows * cols
maps = []
if len(ee_objects) > 0:
if len(ee_objects) == 1:
ee_objects = ee_objects * count
elif len(ee_objects) < count:
raise ValueError(f"The length of ee_objects must be equal to {count}.")
if len(vis_params) > 0:
if len(vis_params) == 1:
vis_params = vis_params * count
elif len(vis_params) < count:
raise ValueError(f"The length of vis_params must be equal to {count}.")
if len(labels) > 0:
if len(labels) == 1:
labels = labels * count
elif len(labels) < count:
raise ValueError(f"The length of labels must be equal to {count}.")
for i in range(rows):
for j in range(cols):
index = i * rows + j
m = Map(
height=height,
lite_mode=True,
add_google_map=False,
layout=widgets.Layout(margin="0px", padding="0px"),
**kwargs,
)
if len(ee_objects) > 0:
m.addLayer(ee_objects[index], vis_params[index], labels[index])
if len(labels) > 0:
label = widgets.Label(
labels[index], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
control = WidgetControl(widget=label, position=label_position)
m.add_control(control)
maps.append(m)
widgets.jslink((maps[0], "center"), (m, "center"))
widgets.jslink((maps[0], "zoom"), (m, "zoom"))
output = widgets.Output()
with output:
display(m)
grid[i, j] = output
return grid
|
sphereexe.py
|
"""
This script is calls the main sphere-overburden routine from sphereresponse.py with the user determined values that are input
in the widgets that are defined and created in options_menu.py
This python file is compiled into an exe by calling Pyinstaller in a directory with sphereresponse.py and options_menu.py
the size of the executable can be decreased by omitting unecessary python libraries when compiling
"""
# Python modules
import sys
import qdarkstyle
import matplotlib.pyplot as plt
import PyQt4.QtCore as QtCore
# 3rd party modules
import matplotlib
import matplotlib.backends.backend_qt4agg as backend_qt4agg
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import PyQt4.QtGui as QtGui
import os, sys
import csv
from os.path import expanduser
import threading
# Local application modules
from sphereresponse import sphereresponse
from options_menu import OptionsMenu
import resources
import numpy as np
APP_NAME = 'EM sphere-overburden response'
AUTHOR = 'Anthony Zamperoni'
class AppForm(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
# Set the window title
self.setWindowTitle(APP_NAME)
self.imported = False
# Create the options menu in a dock widget
self.options_menu = OptionsMenu()
dock = QtGui.QDockWidget('Options', self)
dock.setFeatures(
QtGui.QDockWidget.NoDockWidgetFeatures |
QtGui.QDockWidget.DockWidgetMovable |
QtGui.QDockWidget.DockWidgetFloatable
)
dock.setAllowedAreas(
QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea,
)
dock.setWidget(self.options_menu)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
# Connect the signals from the options menu
self.connect(self.options_menu.update_btn, QtCore.SIGNAL(
'clicked()'),
self.launch_selenium_Thread,
)
self.connect(self.options_menu.clear_graph_btn, QtCore.SIGNAL(
'clicked()'),
self.clear_graph,
)
self.connect(self.options_menu.legend_cb, QtCore.SIGNAL(
'stateChanged(int)'),
self.redraw_graph,
)
self.connect(self.options_menu.grid_cb, QtCore.SIGNAL(
'stateChanged(int)'),
self.redraw_graph,
)
self.connect(self.options_menu.read_data_btn, QtCore.SIGNAL(
'clicked()'), self.read_csv
)
if self.options_menu.grid_cb.isChecked() == True and self.options_menu.legend_cb.isChecked() == True:
self.fig = Figure()
self.canvas = backend_qt4agg.FigureCanvasQTAgg(self.fig)
self.canvas.setParent(self)
self.ax1 = self.axes = self.fig.add_subplot(111)
self.ax2 = self.axes = self.fig.add_subplot(211)
if self.options_menu.grid_cb.isChecked() == False or self.options_menu.legend_cb.isChecked() == False:
self.fig = Figure()
self.canvas = backend_qt4agg.FigureCanvasQTAgg(self.fig)
self.canvas.setParent(self)
self.status_text = QtGui.QLabel("Set paramters and select response components to be plotted")
self.statusBar().addWidget(self.status_text, 0)
self.statusBar().setFont(QtGui.QFont("Times", 20, QtGui.QFont.Bold))
self.progressBar = QtGui.QProgressBar(self)
self.statusBar().addPermanentWidget(self.progressBar, 1)
self.statusBar().addWidget(self.progressBar)
# Initialize the graph
self.clear_graph()
# Set the graph as the main window widget
self.setCentralWidget(self.canvas)
# Create the exit application function in the menubar
file_exit_action = QtGui.QAction('E&xit', self)
file_exit_action.setToolTip('Exit')
file_exit_action.setIcon(QtGui.QIcon(':/resources/door_open.png'))
self.connect(
file_exit_action,
QtCore.SIGNAL('triggered()'),
self.close,
)
about_action = QtGui.QAction('&About', self)
about_action.setToolTip('About')
about_action.setIcon(QtGui.QIcon(':/resources/icon_info.gif'))
self.connect(
about_action,
QtCore.SIGNAL('triggered()'),
self.show_about,
)
# Create the menubar add further functionality at later date
file_menu = self.menuBar().addMenu('&File')
#file_menu.addAction(file_preview_waveform)
file_menu.addAction(file_exit_action)
file_menu = self.menuBar().addMenu('&Edit')
file_menu = self.menuBar().addMenu('&View')
help_menu = self.menuBar().addMenu('&Help')
help_menu.addAction(about_action)
def read_csv(self):
"""
Reads in waveform from csv to convolve
"""
self.filePath = QtGui.QFileDialog.getOpenFileName(self, 'Select CSV File:', '', "CSV data files (*.csv)")
if self.filePath == "":
self.status_text.setText("No waveform data selected")
return
else:
with open(self.filePath) as input_file:
self.imported = True
self.waveformdata = np.genfromtxt(input_file,delimiter = ',')
self.status_text.setText("Successfully loaded and updated waveform parameters")
if self.waveformdata.shape[1] >= 2:
self.windows = self.waveformdata.T[0]
self.wave = self.waveformdata.T[1]
else:
self.windows = self.waveformdata.T[0]
def calculate_data(self):
"""
A function call to the main function that calculates the response using the user inputted values being
called from the widgets defined in options_menu.py
"""
sphere = sphereresponse()
sphere.a = self.options_menu.a_sb.value()
sphere.rsp = np.array([int(n) for n in self.options_menu.rspop.text().split(',')],dtype=np.int64)
sphere.offset_tx_rx = np.array([int(n) for n in self.options_menu.txrx.text().split(',')], dtype = np.int64)
sphere.rtx = np.array([int(n) for n in self.options_menu.tx.text().split(',')], dtype = np.int64)
sphere.thick_ob = self.options_menu.thick_ob_sb.value()
sphere.sigma_sp = self.options_menu.sigma_sp_sb.value()
sphere.sigma_ob = self.options_menu.sigma_ob_sb.value()
sphere.strike = self.options_menu.strike.value()
sphere.dip = self.options_menu.dip.value()
sphere.P = self.options_menu.pulse.value()
sphere.T = self.options_menu.timedelta_sb.value()
if self.imported == True:
sphere.wc = self.windows
sphere.wave = self.wave
# Checks if the sphere is dipping or not passed as value to main routine
if self.options_menu.dip.value() == 0:
sphere.apply_dip = 0
else: sphere.apply_dip = 1
if sphere.sigma_sp == 0:
sphere.sigma_sp = 0.00000000000001
if sphere.sigma_ob == 0:
sphere.sigma_ob = 0.00000000000001
results = sphere.calculate()
"""
The following is an if-then loop for plotting the different components of the response given which boxes are checked
This will be rewritten more efficiently
"""
if (self.options_menu.grid_cb.isChecked() and self.options_menu.legend_cb.isChecked()) and self.options_menu.gridy_cb.isChecked():
self.axes = self.fig.add_subplot(3,1,1)
#self.subplot(2, 1, 1)
x=sphere.H_tot_x
i=0
while i < len(sphere.H_tot_x):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_x[i],color= '0.4') # will have to change x axis for changing param
i += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('x-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
# the first subplot in the first figure
self.axes = self.fig.add_subplot(3,1,2)
z=sphere.H_tot_z
k=0
while k < len(sphere.H_tot_z):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_z[k],color= '0.4') # will have to change x axis for changing param
k += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('z-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
self.axes = self.fig.add_subplot(3,1,3)
#self.subplot(2, 1, 1)
y=sphere.H_tot_y
i=0
while i < len(sphere.H_tot_y):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_y[i],color= '0.4') # will have to change x axis for changing param
i += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('y-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.canvas.addWidget(self.navi_toolbar)
self.canvas.draw()
elif self.options_menu.grid_cb.isChecked() and self.options_menu.legend_cb.isChecked() and self.options_menu.gridy_cb.isChecked() == False:
self.axes = self.fig.add_subplot(2,1,1)
#self.subplot(2, 1, 1)
x=sphere.H_tot_x
i=0
while i < len(sphere.H_tot_x):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_x[i],color= '0.4') # will have to change x axis for changing param
i += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('x-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
# the first subplot in the first figure
self.axes = self.fig.add_subplot(2,1,2)
z=sphere.H_tot_z
k=0
while k < len(sphere.H_tot_z):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_z[k],color= '0.4') # will have to change x axis for changing param
k += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('z-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.canvas.addWidget(self.navi_toolbar)
self.canvas.draw()
elif self.options_menu.grid_cb.isChecked() == False and self.options_menu.legend_cb.isChecked() and self.options_menu.gridy_cb.isChecked():
self.axes = self.fig.add_subplot(2,1,1)
#self.subplot(2, 1, 1)
x=sphere.H_tot_x
i=0
while i < len(sphere.H_tot_x):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_x[i],color= '0.4') # will have to change x axis for changing param
i += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('x-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
# the first subplot in the first figure
self.axes = self.fig.add_subplot(2,1,2)
y=sphere.H_tot_y
k=0
while k < len(sphere.H_tot_z):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_y[k],color= '0.4') # will have to change x axis for changing param
k += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('y-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.canvas.addWidget(self.navi_toolbar)
self.canvas.draw()
elif self.options_menu.grid_cb.isChecked() and self.options_menu.legend_cb.isChecked() == False and self.options_menu.gridy_cb.isChecked():
self.axes = self.fig.add_subplot(2,1,1)
#self.subplot(2, 1, 1)
z=sphere.H_tot_z
i=0
while i < len(sphere.H_tot_x):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_z[i],color= '0.4') # will have to change x axis for changing param
i += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('z-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
# the first subplot in the first figure
self.axes = self.fig.add_subplot(2,1,2)
y=sphere.H_tot_y
k=0
while k < len(sphere.H_tot_z):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_y[k],color= '0.4') # will have to change x axis for changing param
k += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('y-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.canvas.addWidget(self.navi_toolbar)
self.canvas.draw()
elif self.options_menu.legend_cb.isChecked() and self.options_menu.grid_cb.isChecked() == False and self.options_menu.gridy_cb.isChecked() == False:
self.fig.clf()
self.axes = self.fig.add_subplot(111)
x=sphere.H_tot_x
i=0
while i < len(sphere.H_tot_x):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_x[i],color= '0.4') # will have to change x axis for changing param
i += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('x-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.axes.plot(x)
self.canvas.draw()
elif self.options_menu.grid_cb.isChecked() and self.options_menu.legend_cb.isChecked() == False and self.options_menu.gridy_cb.isChecked() == False:
self.fig.clf()
self.axes = self.fig.add_subplot(111)
z=sphere.H_tot_z
k=0
while k < len(sphere.H_tot_z):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_z[k],color= '0.4') # will have to change x axis for changing param
k += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('z-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.axes.plot(x)
self.canvas.draw()
elif self.options_menu.grid_cb.isChecked() == False and self.options_menu.legend_cb.isChecked() == False and self.options_menu.gridy_cb.isChecked():
self.fig.clf()
self.axes = self.fig.add_subplot(111)
y=sphere.H_tot_y
k=0
while k < len(sphere.H_tot_z):
self.axes.plot(np.linspace(sphere.profile[0][0], sphere.profile[0][100], 101), sphere.H_tot_y[k],color= '0.4') # will have to change x axis for changing param
k += 1
self.axes.set_xlabel('Profile (m)')
self.axes.set_ylabel('y-component (A/m)')
self.axes.grid(True, which = 'both', ls = '-')
#self.axes.plot(x)
self.canvas.draw()
self.progressBar.setRange(0,1)
self.status_text.setText("Finished")
self.statusBar().setFont(QtGui.QFont("Times", 20, QtGui.QFont.Bold))
def clear_graph(self):
self.redraw_graph()
def redraw_graph(self):
self.fig.clf()
self.canvas.draw()
def launch_selenium_Thread(self):
"""
A function to prevent the program from becoming unresponsive while the response is being calculated/plotted
"""
t = threading.Thread(target=self.calculate_data)
self.status_text.setText("Generating response")
# Create updating progress bar
self.statusBar().setFont(QtGui.QFont("Times", 20, QtGui.QFont.Bold))
self.progressBar.setRange(0,0)
t.start()
def show_about(self):
"""
Display the "about" dialog box.
"""
message = '''<font size="+2">%s</font>
<p>A sphere - overburden response plotter written in Python.
<p>Written by %s,
<a href="http://opensource.org/licenses/MIT">MIT Licensed</a>
<p>Icons from <a href="http://www.famfamfam.com/">famfamfam</a> and
<a href="http://commons.wikimedia.org/">Wikimedia
Commons</a>.''' % (APP_NAME, AUTHOR)
QtGui.QMessageBox.about(self, 'About ' + APP_NAME, message)
if __name__ == "__main__":
# Dark UI theme
dark_stylesheet = qdarkstyle.load_stylesheet_pyqt()
app = QtGui.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon(':/resources/icon.svg'))
app.setStyleSheet(dark_stylesheet)
form = AppForm()
form.show()
app.exec_()
|
engine.py
|
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2018 Niklas Fiekas <niklas.fiekas@backscattering.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import logging
import threading
import os
import sys
import signal
import platform
try:
import queue # Python 3
except ImportError:
import Queue as queue # Python 2
if os.name == "posix" and sys.version_info[0] < 3:
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
else:
import subprocess
FUTURE_POLL_TIMEOUT = 0.1 if platform.system() == "Windows" else 60
LOGGER = logging.getLogger(__name__)
class EngineTerminatedException(Exception):
"""The engine has been terminated."""
pass
class EngineStateException(Exception):
"""Unexpected engine state."""
pass
class Option(collections.namedtuple("Option", "name type default min max var")):
"""Information about an available option for an UCI engine."""
__slots__ = ()
class MockProcess(object):
def __init__(self, engine):
self.engine = engine
self._expectations = collections.deque()
self._is_dead = threading.Event()
self._std_streams_closed = False
self.engine.on_process_spawned(self)
self._send_queue = queue.Queue()
self._send_thread = threading.Thread(target=self._send_thread_target)
self._send_thread.daemon = True
self._send_thread.start()
def _send_thread_target(self):
while not self._is_dead.is_set():
line = self._send_queue.get()
if line is not None:
self.engine.on_line_received(line)
self._send_queue.task_done()
def expect(self, expectation, responses=()):
self._expectations.append((expectation, responses))
def assert_done(self):
assert not self._expectations, "pending expectations: {0}".format(self._expectations)
def assert_terminated(self):
self.assert_done()
assert self._is_dead.is_set()
def is_alive(self):
return not self._is_dead.is_set()
def terminate(self):
self._is_dead.set()
self._send_queue.put(None)
self.engine.on_terminated()
def kill(self):
self._is_dead.set()
self._send_queue.put(None)
self.engine.on_terminated()
def send_line(self, string):
assert self.is_alive()
assert self._expectations, "unexpected: {0}".format(string)
expectation, responses = self._expectations.popleft()
assert expectation == string, "expected: {0}, got {1}".format(expectation, string)
for response in responses:
self._send_queue.put(response)
def wait_for_return_code(self):
self._is_dead.wait()
return 0
def pid(self):
return None
def __repr__(self):
return "<MockProcess at {0}>".format(hex(id(self)))
class PopenProcess(object):
def __init__(self, engine, command, **kwargs):
self.engine = engine
self._receiving_thread = threading.Thread(target=self._receiving_thread_target)
self._receiving_thread.daemon = True
self._stdin_lock = threading.Lock()
self.engine.on_process_spawned(self)
popen_args = {
"stdout": subprocess.PIPE,
"stdin": subprocess.PIPE,
"bufsize": 1, # Line buffering
"universal_newlines": True,
}
popen_args.update(kwargs)
self.process = subprocess.Popen(command, **popen_args)
self._receiving_thread.start()
def _receiving_thread_target(self):
while True:
line = self.process.stdout.readline()
if not line:
# Stream closed.
break
self.engine.on_line_received(line.rstrip())
# Close file descriptors.
self.process.stdout.close()
with self._stdin_lock:
self.process.stdin.close()
# Ensure the process is terminated (not just the in/out streams).
if self.is_alive():
self.terminate()
self.wait_for_return_code()
self.engine.on_terminated()
def is_alive(self):
return self.process.poll() is None
def terminate(self):
self.process.terminate()
def kill(self):
self.process.kill()
def send_line(self, string):
with self._stdin_lock:
self.process.stdin.write(string + "\n")
self.process.stdin.flush()
def wait_for_return_code(self):
self.process.wait()
return self.process.returncode
def pid(self):
return self.process.pid
def __repr__(self):
return "<PopenProcess at {0} (pid={1})>".format(hex(id(self)), self.pid())
class SpurProcess(object):
def __init__(self, engine, shell, command):
self.engine = engine
self.shell = shell
self._stdout_buffer = []
self._result = None
self._waiting_thread = threading.Thread(target=self._waiting_thread_target)
self._waiting_thread.daemon = True
self.engine.on_process_spawned(self)
self.process = self.shell.spawn(command, store_pid=True, allow_error=True, stdout=self)
self._waiting_thread.start()
def write(self, byte):
# Interally called whenever a byte is received.
if byte == b"\r":
pass
elif byte == b"\n":
self.engine.on_line_received(b"".join(self._stdout_buffer).decode("utf-8"))
del self._stdout_buffer[:]
else:
self._stdout_buffer.append(byte)
def _waiting_thread_target(self):
self._result = self.process.wait_for_result()
self.engine.on_terminated()
def is_alive(self):
return self.process.is_running()
def terminate(self):
self.process.send_signal(signal.SIGTERM)
def kill(self):
self.process.send_signal(signal.SIGKILL)
def send_line(self, string):
self.process.stdin_write(string.encode("utf-8"))
self.process.stdin_write(b"\n")
def wait_for_return_code(self):
return self.process.wait_for_result().return_code
def pid(self):
return self.process.pid
def __repr__(self):
return "<SpurProcess at {0} (pid={1})>".format(hex(id(self)), self.pid())
class OptionMap(collections.MutableMapping):
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def __eq__(self, other):
for key, value in self.items():
if key not in other or other[key] != value:
return False
for key, value in other.items():
if key not in self or self[key] != value:
return False
return True
def copy(self):
return type(self)(self._store.values())
def __copy__(self):
return self.copy()
def __repr__(self):
return "{0}({1})".format(type(self).__name__, dict(self.items()))
def _popen_engine(command, engine_cls, setpgrp=False, _popen_lock=threading.Lock(), **kwargs):
"""
Opens a local chess engine process.
:param engine_cls: Engine class
:param setpgrp: Open the engine process in a new process group. This will
stop signals (such as keyboards interrupts) from propagating from the
parent process. Defaults to ``False``.
"""
engine = engine_cls()
popen_args = {}
if setpgrp:
try:
# Windows.
popen_args["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
except AttributeError:
# Unix.
popen_args["preexec_fn"] = os.setpgrp
popen_args.update(kwargs)
# Work around a possible race condition in Python 2 subprocess module
# that can occur when concurrently opening processes.
with _popen_lock:
PopenProcess(engine, command, **popen_args)
return engine
def _spur_spawn_engine(shell, command, engine_cls):
"""
Spawns a remote engine using a `Spur`_ shell.
.. _Spur: https://pypi.python.org/pypi/spur
"""
engine = engine_cls()
SpurProcess(engine, shell, command)
return engine
|
filesystemio_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for filesystemio."""
from __future__ import absolute_import
import io
import logging
import multiprocessing
import os
import sys
import threading
import unittest
from builtins import range
from apache_beam.io import filesystemio
class FakeDownloader(filesystemio.Downloader):
def __init__(self, data):
self._data = data
self.last_read_size = -1
@property
def size(self):
return len(self._data)
def get_range(self, start, end):
self.last_read_size = end - start
return self._data[start:end]
class FakeUploader(filesystemio.Uploader):
def __init__(self):
self.data = ''
self.last_write_size = -1
self.finished = False
def last_error(self):
return None
def put(self, data):
assert not self.finished
self.data += data.tobytes()
self.last_write_size = len(data)
def finish(self):
self.finished = True
class TestDownloaderStream(unittest.TestCase):
def test_file_attributes(self):
downloader = FakeDownloader(data=None)
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.mode, 'r')
self.assertTrue(stream.readable())
self.assertFalse(stream.writable())
self.assertTrue(stream.seekable())
def test_read_empty(self):
downloader = FakeDownloader(data=b'')
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.read(), b'')
def test_read(self):
data = 'abcde'
downloader = FakeDownloader(data)
stream = filesystemio.DownloaderStream(downloader)
# Read size is exactly what was passed to read() (unbuffered).
self.assertEqual(stream.read(1), data[0])
self.assertEqual(downloader.last_read_size, 1)
self.assertEqual(stream.read(), data[1:])
self.assertEqual(downloader.last_read_size, len(data) - 1)
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-5627')
def test_read_buffered(self):
data = 'abcde'
downloader = FakeDownloader(data)
buffer_size = 2
stream = io.BufferedReader(filesystemio.DownloaderStream(downloader),
buffer_size)
# Verify that buffering works and is reading ahead.
self.assertEqual(stream.read(1), data[0])
self.assertEqual(downloader.last_read_size, buffer_size)
self.assertEqual(stream.read(), data[1:])
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-5627')
class TestUploaderStream(unittest.TestCase):
def test_file_attributes(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
self.assertEqual(stream.mode, 'w')
self.assertFalse(stream.readable())
self.assertTrue(stream.writable())
self.assertFalse(stream.seekable())
def test_write_empty(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
data = ''
stream.write(memoryview(data))
self.assertEqual(uploader.data, data)
def test_write(self):
data = 'abcde'
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
# Unbuffered writes.
stream.write(memoryview(data[0]))
self.assertEqual(uploader.data[0], data[0])
self.assertEqual(uploader.last_write_size, 1)
stream.write(memoryview(data[1:]))
self.assertEqual(uploader.data, data)
self.assertEqual(uploader.last_write_size, len(data) - 1)
def test_write_buffered(self):
data = 'abcde'
uploader = FakeUploader()
buffer_size = 2
stream = io.BufferedWriter(filesystemio.UploaderStream(uploader),
buffer_size)
# Verify that buffering works: doesn't write to uploader until buffer is
# filled.
stream.write(data[0])
self.assertEqual(-1, uploader.last_write_size)
stream.write(data[1:])
stream.close()
self.assertEqual(data, uploader.data)
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(b''.join(data_list), expected)
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = b''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
parent_conn, child_conn = multiprocessing.Pipe()
stream = filesystemio.PipeStream(child_conn)
child_thread = threading.Thread(
target=self._read_and_verify, args=(stream, expected, buffer_size))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
gtagsExpl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import re
import os
import os.path
import shutil
import itertools
import subprocess
from .utils import *
from .explorer import *
from .manager import *
if sys.version_info >= (3, 0):
import queue as Queue
else:
import Queue
#*****************************************************
# GtagsExplorer
#*****************************************************
class GtagsExplorer(Explorer):
def __init__(self):
self._executor = []
self._pattern_regex = []
if os.name == 'nt':
self._cd_option = '/d '
else:
self._cd_option = ''
self._root_markers = lfEval("g:Lf_RootMarkers")
self._db_location = os.path.join(lfEval("g:Lf_CacheDirectory"),
'.LfCache',
'gtags')
self._project_root = ""
self._gtagslibpath = []
self._result_format = None
self._last_result_format = None
self._evalVimVar()
self._has_nvim = lfEval("has('nvim')") == '1'
self._task_queue = Queue.Queue()
self._worker_thread = threading.Thread(target=self._processTask)
self._worker_thread.daemon = True
self._worker_thread.start()
def __del__(self):
self._task_queue.put(None)
self._worker_thread.join()
def _processTask(self):
while True:
try:
task = self._task_queue.get()
if task is None:
break
task()
except Exception as e:
print(e)
def getContent(self, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
if "--recall" in arguments_dict:
return []
if vim.current.buffer.name:
filename = lfDecode(vim.current.buffer.name)
else:
filename = os.path.join(os.getcwd(), 'no_name')
if "--gtagsconf" in arguments_dict:
self._gtagsconf = arguments_dict["--gtagsconf"][0]
if "--gtagslabel" in arguments_dict:
self._gtagslabel = arguments_dict["--gtagslabel"][0]
if self._gtagsconf == '' and os.name == 'nt':
self._gtagsconf = os.path.normpath(os.path.join(self._which("gtags.exe"), "..", "share", "gtags", "gtags.conf"))
if "--gtagslibpath" in arguments_dict:
self._gtagslibpath = [os.path.abspath(os.path.expanduser(p)) for p in arguments_dict["--gtagslibpath"]]
else:
self._gtagslibpath = []
if "--update" in arguments_dict:
if "--accept-dotfiles" in arguments_dict:
self._accept_dotfiles = "--accept-dotfiles "
if "--skip-unreadable" in arguments_dict:
self._skip_unreadable = "--skip-unreadable "
if "--skip-symlink" in arguments_dict:
skip_symlink = arguments_dict["--skip-symlink"]
self._skip_symlink = "--skip-symlink%s " % ('=' + skip_symlink[0] if skip_symlink else "")
self.updateGtags(filename, single_update=False, auto=False)
return
elif "--remove" in arguments_dict:
self._remove(filename)
return
if "--path-style" in arguments_dict:
path_style = "--path-style %s " % arguments_dict["--path-style"][0]
else:
path_style = ""
auto_jump = False
self._last_result_format = self._result_format
self._result_format = None
if "-d" in arguments_dict:
pattern = arguments_dict["-d"][0]
pattern_option = "-d -e %s " % pattern
if "--auto-jump" in arguments_dict:
auto_jump = True
elif "-r" in arguments_dict:
pattern = arguments_dict["-r"][0]
pattern_option = "-r -e %s " % pattern
if "--auto-jump" in arguments_dict:
auto_jump = True
elif "-s" in arguments_dict:
pattern = arguments_dict["-s"][0]
pattern_option = "-s -e %s " % pattern
elif "-g" in arguments_dict:
pattern = arguments_dict["-g"][0]
pattern_option = "-g -e %s " % pattern
elif "--by-context" in arguments_dict:
pattern = lfEval('expand("<cword>")')
pattern_option = '--from-here "%d:%s" %s ' % (vim.current.window.cursor[0], vim.current.buffer.name, pattern)
else:
if "--current-buffer" in arguments_dict:
pattern_option = '-f "%s" -q' % vim.current.buffer.name
elif "--all-buffers" in arguments_dict:
pattern_option = '-f "%s" -q' % '" "'.join(b.name for b in vim.buffers)
else: # '--all' or empty means the whole project
pattern_option = None
root, dbpath, exists = self._root_dbpath(filename)
if not filename.startswith(root):
libdb = os.path.join(dbpath, "GTAGSLIBPATH")
if os.path.exists(libdb):
with lfOpen(libdb, 'r', errors='ignore') as f:
for line in f:
tmp_root, tmp_dbpath = line.rstrip().split('\t', 1)
if filename.startswith(tmp_root):
root = tmp_root
dbpath = tmp_dbpath
break
if "--result" in arguments_dict:
self._result_format = arguments_dict["--result"][0]
else:
self._result_format = "ctags"
env = os.environ
env["GTAGSROOT"] = root
env["GTAGSDBPATH"] = dbpath
if pattern_option is None: # '--all' or empty means the whole project
cmd = 'global -P | global -L- -f {}--gtagslabel={} {}--color=never --result={}'.format(
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, path_style, self._result_format)
else:
cmd = 'global {}--gtagslabel={} {} {}--color=never --result={}'.format(
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, pattern_option, path_style, self._result_format)
executor = AsyncExecutor()
self._executor.append(executor)
lfCmd("let g:Lf_Debug_GtagsCmd = '%s'" % escQuote(cmd))
content = executor.execute(cmd, env=env)
return content
if "-S" in arguments_dict:
scope = "--scope %s " % os.path.abspath(arguments_dict["-S"][0])
else:
scope = ""
if "--literal" in arguments_dict:
literal = "--literal "
else:
literal = ""
if "-i" in arguments_dict:
ignorecase = "-i "
else:
ignorecase = ""
if "--append" not in arguments_dict or self._last_result_format is not None:
self._pattern_regex = []
# build vim regex, which is used for highlighting
if ignorecase:
case_pattern = r'\c'
else:
case_pattern = r'\C'
if len(pattern) > 1 and (pattern[0] == pattern[-1] == '"' or pattern[0] == pattern[-1] == "'"):
p = pattern[1:-1]
else:
p = pattern
if literal:
if len(pattern) > 1 and pattern[0] == pattern[-1] == '"':
p = re.sub(r'\\(?!")', r'\\\\', p)
else:
p = p.replace('\\', r'\\')
self._pattern_regex.append(r'\V' + case_pattern + p)
else:
vim_regex = self.translateRegex(case_pattern + p)
if "-g" not in arguments_dict:
vim_regex = vim_regex.replace('.', r'\w')
self._pattern_regex.append(vim_regex)
root, dbpath, exists = self._root_dbpath(filename)
env = os.environ
env["GTAGSROOT"] = root
env["GTAGSDBPATH"] = dbpath
cmd = 'global {}--gtagslabel={} {} {}{}{}{}--color=never --result=ctags-mod'.format(
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, pattern_option, path_style, scope, literal, ignorecase)
executor = AsyncExecutor()
self._executor.append(executor)
lfCmd("let g:Lf_Debug_GtagsCmd = '%s'" % escQuote(cmd))
content = executor.execute(cmd, env=env)
libdb = os.path.join(dbpath, "GTAGSLIBPATH")
if os.path.exists(libdb):
with lfOpen(libdb, 'r', errors='ignore') as f:
for line in f:
root, dbpath = line.rstrip().split('\t', 1)
env = os.environ
env["GTAGSROOT"] = root
env["GTAGSDBPATH"] = dbpath
if path_style == "--path-style abslib ":
path_style = "--path-style absolute "
cmd = 'global {}--gtagslabel={} {} {}{}{}{}--color=never --result=ctags-mod -q'.format(
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, pattern_option, path_style, scope, literal, ignorecase)
executor = AsyncExecutor()
self._executor.append(executor)
content += executor.execute(cmd, env=env)
if auto_jump:
first_two = list(itertools.islice(content, 2))
if len(first_two) == 1:
return first_two
else:
return content.join_left(first_two)
return content
def translateRegex(self, regex, is_perl=False):
"""
copied from RgExplorer
"""
vim_regex = regex
vim_regex = re.sub(r'([%@&])', r'\\\1', vim_regex)
# non-greedy pattern
vim_regex = re.sub(r'(?<!\\)\*\?', r'{-}', vim_regex)
vim_regex = re.sub(r'(?<!\\)\+\?', r'{-1,}', vim_regex)
vim_regex = re.sub(r'(?<!\\)\?\?', r'{-0,1}', vim_regex)
vim_regex = re.sub(r'(?<!\\)\{(.*?)\}\?', r'{-\1}', vim_regex)
if is_perl:
# *+, ++, ?+, {m,n}+ => *, +, ?, {m,n}
vim_regex = re.sub(r'(?<!\\)([*+?}])\+', r'\1', vim_regex)
# remove (?#....)
vim_regex = re.sub(r'\(\?#.*?\)', r'', vim_regex)
# (?=atom) => atom\@=
vim_regex = re.sub(r'\(\?=(.+?)\)', r'(\1)@=', vim_regex)
# (?!atom) => atom\@!
vim_regex = re.sub(r'\(\?!(.+?)\)', r'(\1)@!', vim_regex)
# (?<=atom) => atom\@<=
vim_regex = re.sub(r'\(\?<=(.+?)\)', r'(\1)@<=', vim_regex)
# (?<!atom) => atom\@<!
vim_regex = re.sub(r'\(\?<!(.+?)\)', r'(\1)@<!', vim_regex)
# (?>atom) => atom\@>
vim_regex = re.sub(r'\(\?>(.+?)\)', r'(\1)@>', vim_regex)
# this won't hurt although they are not the same
vim_regex = vim_regex.replace(r'\A', r'^')
vim_regex = vim_regex.replace(r'\z', r'$')
vim_regex = vim_regex.replace(r'\B', r'')
# word boundary
vim_regex = re.sub(r'\\b', r'(<|>)', vim_regex)
# case-insensitive
vim_regex = vim_regex.replace(r'(?i)', r'\c')
vim_regex = vim_regex.replace(r'(?-i)', r'\C')
# (?P<name>exp) => (exp)
vim_regex = re.sub(r'(?<=\()\?P<\w+>', r'', vim_regex)
# (?:exp) => %(exp)
vim_regex = re.sub(r'\(\?:(.+?)\)', r'%(\1)', vim_regex)
# \a bell (\x07)
# \f form feed (\x0C)
# \v vertical tab (\x0B)
vim_regex = vim_regex.replace(r'\a', r'%x07')
vim_regex = vim_regex.replace(r'\f', r'%x0C')
vim_regex = vim_regex.replace(r'\v', r'%x0B')
# \123 octal character code (up to three digits) (when enabled)
# \x7F hex character code (exactly two digits)
vim_regex = re.sub(r'\\(x[0-9A-Fa-f][0-9A-Fa-f])', r'%\1', vim_regex)
# \x{10FFFF} any hex character code corresponding to a Unicode code point
# \u007F hex character code (exactly four digits)
# \u{7F} any hex character code corresponding to a Unicode code point
# \U0000007F hex character code (exactly eight digits)
# \U{7F} any hex character code corresponding to a Unicode code point
vim_regex = re.sub(r'\\([uU])', r'%\1', vim_regex)
vim_regex = re.sub(r'\[\[:ascii:\]\]', r'[\\x00-\\x7F]', vim_regex)
vim_regex = re.sub(r'\[\[:word:\]\]', r'[0-9A-Za-z_]', vim_regex)
vim_regex = vim_regex.replace(r'[[:^alnum:]]', r'[^0-9A-Za-z]')
vim_regex = vim_regex.replace(r'[[:^alpha:]]', r'[^A-Za-z]')
vim_regex = vim_regex.replace(r'[[:^ascii:]]', r'[^\x00-\x7F]')
vim_regex = vim_regex.replace(r'[[:^blank:]]', r'[^\t ]')
vim_regex = vim_regex.replace(r'[[:^cntrl:]]', r'[^\x00-\x1F\x7F]')
vim_regex = vim_regex.replace(r'[[:^digit:]]', r'[^0-9]')
vim_regex = vim_regex.replace(r'[[:^graph:]]', r'[^!-~]')
vim_regex = vim_regex.replace(r'[[:^lower:]]', r'[^a-z]')
vim_regex = vim_regex.replace(r'[[:^print:]]', r'[^ -~]')
vim_regex = vim_regex.replace(r'[[:^punct:]]', r'[^!-/:-@\[-`{-~]')
vim_regex = vim_regex.replace(r'[[:^space:]]', r'[^\t\n\r ]')
vim_regex = vim_regex.replace(r'[[:^upper:]]', r'[^A-Z]')
vim_regex = vim_regex.replace(r'[[:^word:]]', r'[^0-9A-Za-z_]')
vim_regex = vim_regex.replace(r'[[:^xdigit:]]', r'[^0-9A-Fa-f]')
return r'\v' + vim_regex
def _nearestAncestor(self, markers, path):
"""
return the nearest ancestor path(including itself) of `path` that contains
one of files or directories in `markers`.
`markers` is a list of file or directory names.
"""
if os.name == 'nt':
# e.g. C:\\
root = os.path.splitdrive(os.path.abspath(path))[0] + os.sep
else:
root = '/'
path = os.path.abspath(path)
while path != root:
for name in markers:
if os.path.exists(os.path.join(path, name)):
return path
path = os.path.abspath(os.path.join(path, ".."))
for name in markers:
if os.path.exists(os.path.join(path, name)):
return path
return ""
def _isVersionControl(self, filename):
if self._project_root and filename.startswith(self._project_root):
return True
ancestor = self._nearestAncestor(self._root_markers, os.path.dirname(filename))
if ancestor:
self._project_root = ancestor
return True
else:
return False
def _generateDbpath(self, path):
if os.name == 'nt':
db_folder = re.sub(r'[\\/]', '%', path.replace(':\\', '%', 1))
else:
db_folder = path.replace('/', '%')
return os.path.join(self._db_location, db_folder)
def _root_dbpath(self, filename):
"""
return the (root, dbpath, whether gtags exists)
"""
if self._project_root and filename.startswith(self._project_root):
root = self._project_root
else:
ancestor = self._nearestAncestor(self._root_markers, os.path.dirname(filename))
if ancestor:
self._project_root = ancestor
root = self._project_root
else:
ancestor = self._nearestAncestor(self._root_markers, os.getcwd())
if ancestor:
self._project_root = ancestor
root = self._project_root
else:
root = os.getcwd()
dbpath = self._generateDbpath(root)
return (root, dbpath, os.path.exists(os.path.join(dbpath, "GTAGS")))
def updateGtags(self, filename, single_update, auto):
self._task_queue.put(partial(self._update, filename, single_update, auto))
def _remove(self, filename):
if filename == "":
return
root, dbpath, exists = self._root_dbpath(filename)
try:
lfCmd("echohl Question")
if lfEval('input("Are you sure you want to remove directory `{}`?[Ny] ")'.format(lfEncode(dbpath.replace('\\', r'\\')))) in ["Y","y"]:
shutil.rmtree(dbpath)
except Exception as e:
lfPrintError(e)
finally:
lfCmd("echohl NONE")
def _update(self, filename, single_update, auto):
if filename == "":
return
if self._gtagsconf == '' and os.name == 'nt':
self._gtagsconf = os.path.normpath(os.path.join(self._which("gtags.exe"), "..", "share", "gtags", "gtags.conf"))
root, dbpath, exists = self._root_dbpath(filename)
self._updateLibGtags(dbpath)
if single_update:
if exists:
cmd = 'cd {}"{}" && gtags {}{}{}{}--gtagslabel {} --single-update "{}" "{}"'.format(self._cd_option, root,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, filename, dbpath)
env = os.environ
env["GTAGSFORCECPP"] = ""
subprocess.Popen(cmd, shell=True, env=env)
elif not auto:
self._executeCmd(root, dbpath)
elif self._isVersionControl(filename):
if not exists:
self._executeCmd(root, dbpath)
def _updateLibGtags(self, dbpath):
if not self._gtagslibpath:
return
if not os.path.exists(dbpath):
os.makedirs(dbpath)
libpaths = ["%s\t%s\n" % (p, self._generateDbpath(p)) for p in self._gtagslibpath if os.path.exists(p)]
if libpaths:
libdb = os.path.join(dbpath, "GTAGSLIBPATH")
with lfOpen(libdb, 'w', errors='ignore') as f:
f.writelines(libpaths)
if self._gtagsconf == '' and os.name == 'nt':
self._gtagsconf = os.path.normpath(os.path.join(self._which("gtags.exe"), "..", "share", "gtags", "gtags.conf"))
env = os.environ
env["GTAGSFORCECPP"] = ""
for path in self._gtagslibpath:
if not os.path.exists(path):
continue
libdbpath = self._generateDbpath(path)
if not os.path.exists(libdbpath):
os.makedirs(libdbpath)
cmd = 'cd {}"{}" && gtags {}{}{}{}--gtagslabel {} "{}"'.format(self._cd_option, path,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, libdbpath)
subprocess.Popen(cmd, shell=True, env=env)
def _which(self, executable):
for p in os.environ["PATH"].split(";"):
if os.path.exists(os.path.join(p, executable)):
return p
return ""
def _evalVimVar(self):
"""
vim variables can not be accessed from a python thread,
so we should evaluate the value in advance.
"""
self._accept_dotfiles = "--accept-dotfiles " if lfEval("get(g:, 'Lf_GtagsAcceptDotfiles', '0')") == '1' else ""
self._skip_unreadable = "--skip-unreadable " if lfEval("get(g:, 'Lf_GtagsSkipUnreadable', '0')") == '1' else ""
self._skip_symlink = "--skip-symlink%s " % ('=' + lfEval("get(g:, 'Lf_GtagsSkipSymlink', '')")
if lfEval("get(g:, 'Lf_GtagsSkipSymlink', '')") != '' else "")
self._gtagsconf = lfEval("get(g:, 'Lf_Gtagsconf', '')")
self._gtagslabel = lfEval("get(g:, 'Lf_Gtagslabel', 'default')")
self._Lf_GtagsSource = int(lfEval("get(g:, 'Lf_GtagsSource', 0)"))
if self._Lf_GtagsSource not in [0, 1, 2]:
self._Lf_GtagsSource = 0
if self._Lf_GtagsSource != 1: # only using FileExplorer needs to evaluate the following variables
if self._Lf_GtagsSource == 2:
self._Lf_GtagsfilesCmd = lfEval("g:Lf_GtagsfilesCmd")
return
if lfEval("exists('g:Lf_ExternalCommand')") == '1':
self._Lf_ExternalCommand = lfEval("g:Lf_ExternalCommand") % dir.join('""')
return
self._Lf_ExternalCommand = None
self._Lf_UseVersionControlTool = lfEval("g:Lf_UseVersionControlTool") == '1'
self._Lf_WildIgnore = lfEval("g:Lf_WildIgnore")
self._Lf_RecurseSubmodules = lfEval("get(g:, 'Lf_RecurseSubmodules', 0)") == '1'
if lfEval("exists('g:Lf_DefaultExternalTool')") == '1':
self._default_tool = {"rg": 0, "pt": 0, "ag": 0, "find": 0}
tool = lfEval("g:Lf_DefaultExternalTool")
if tool and lfEval("executable('%s')" % tool) == '0':
raise Exception("executable '%s' can not be found!" % tool)
self._default_tool[tool] = 1
else:
self._default_tool = {"rg": 1, "pt": 1, "ag": 1, "find": 1}
self._is_rg_executable = lfEval("executable('rg')") == '1'
self._Lf_ShowHidden = lfEval("g:Lf_ShowHidden") != '0'
self._Lf_FollowLinks = lfEval("g:Lf_FollowLinks") == '1'
self._is_pt_executable = lfEval("executable('pt')") == '1'
self._is_ag_executable = lfEval("executable('ag')") == '1'
self._is_find_executable = lfEval("executable('find')") == '1'
def _exists(self, path, dir):
"""
return True if `dir` exists in `path` or its ancestor path,
otherwise return False
"""
if os.name == 'nt':
# e.g. C:\\
root = os.path.splitdrive(os.path.abspath(path))[0] + os.sep
else:
root = '/'
while os.path.abspath(path) != root:
cur_dir = os.path.join(path, dir)
if os.path.exists(cur_dir) and os.path.isdir(cur_dir):
return True
path = os.path.join(path, "..")
cur_dir = os.path.join(path, dir)
if os.path.exists(cur_dir) and os.path.isdir(cur_dir):
return True
return False
def _buildCmd(self, dir, **kwargs):
"""
this function comes from FileExplorer
"""
# do not use external command if the encoding of `dir` is not ascii
if not isAscii(dir):
return None
if self._Lf_ExternalCommand:
return self._Lf_ExternalCommand
arguments_dict = kwargs.get("arguments", {})
if self._Lf_UseVersionControlTool:
if self._exists(dir, ".git"):
wildignore = self._Lf_WildIgnore
if ".git" in wildignore["dir"]:
wildignore["dir"].remove(".git")
if ".git" in wildignore["file"]:
wildignore["file"].remove(".git")
ignore = ""
for i in wildignore["dir"]:
ignore += ' -x "%s"' % i
for i in wildignore["file"]:
ignore += ' -x "%s"' % i
if "--no-ignore" in arguments_dict:
no_ignore = ""
else:
no_ignore = "--exclude-standard"
if self._Lf_RecurseSubmodules:
recurse_submodules = "--recurse-submodules"
else:
recurse_submodules = ""
cmd = 'git ls-files %s "%s" && git ls-files --others %s %s "%s"' % (recurse_submodules, dir, no_ignore, ignore, dir)
return cmd
elif self._exists(dir, ".hg"):
wildignore = self._Lf_WildIgnore
if ".hg" in wildignore["dir"]:
wildignore["dir"].remove(".hg")
if ".hg" in wildignore["file"]:
wildignore["file"].remove(".hg")
ignore = ""
for i in wildignore["dir"]:
ignore += ' -X "%s"' % self._expandGlob("dir", i)
for i in wildignore["file"]:
ignore += ' -X "%s"' % self._expandGlob("file", i)
cmd = 'hg files %s "%s"' % (ignore, dir)
return cmd
default_tool = self._default_tool
if default_tool["rg"] and self._is_rg_executable:
wildignore = self._Lf_WildIgnore
if os.name == 'nt': # https://github.com/BurntSushi/ripgrep/issues/500
color = ""
ignore = ""
for i in wildignore["dir"]:
if self._Lf_ShowHidden or not i.startswith('.'): # rg does not show hidden files by default
ignore += ' -g "!%s"' % i
for i in wildignore["file"]:
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += ' -g "!%s"' % i
else:
color = "--color never"
ignore = ""
for i in wildignore["dir"]:
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += " -g '!%s'" % i
for i in wildignore["file"]:
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += " -g '!%s'" % i
if self._Lf_FollowLinks:
followlinks = "-L"
else:
followlinks = ""
if self._Lf_ShowHidden:
show_hidden = "--hidden"
else:
show_hidden = ""
if "--no-ignore" in arguments_dict:
no_ignore = "--no-ignore"
else:
no_ignore = ""
if dir == '.':
cur_dir = ''
else:
cur_dir = '"%s"' % dir
cmd = 'rg --no-messages --files %s %s %s %s %s %s' % (color, ignore, followlinks, show_hidden, no_ignore, cur_dir)
elif default_tool["pt"] and self._is_pt_executable and os.name != 'nt': # there is bug on Windows
wildignore = self._Lf_WildIgnore
ignore = ""
for i in wildignore["dir"]:
if self._Lf_ShowHidden or not i.startswith('.'): # pt does not show hidden files by default
ignore += " --ignore=%s" % i
for i in wildignore["file"]:
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += " --ignore=%s" % i
if self._Lf_FollowLinks:
followlinks = "-f"
else:
followlinks = ""
if self._Lf_ShowHidden:
show_hidden = "--hidden"
else:
show_hidden = ""
if "--no-ignore" in arguments_dict:
no_ignore = "-U"
else:
no_ignore = ""
cmd = 'pt --nocolor %s %s %s %s -g="" "%s"' % (ignore, followlinks, show_hidden, no_ignore, dir)
elif default_tool["ag"] and self._is_ag_executable and os.name != 'nt': # https://github.com/vim/vim/issues/3236
wildignore = self._Lf_WildIgnore
ignore = ""
for i in wildignore["dir"]:
if self._Lf_ShowHidden or not i.startswith('.'): # ag does not show hidden files by default
ignore += ' --ignore "%s"' % i
for i in wildignore["file"]:
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += ' --ignore "%s"' % i
if self._Lf_FollowLinks:
followlinks = "-f"
else:
followlinks = ""
if self._Lf_ShowHidden:
show_hidden = "--hidden"
else:
show_hidden = ""
if "--no-ignore" in arguments_dict:
no_ignore = "-U"
else:
no_ignore = ""
cmd = 'ag --nocolor --silent %s %s %s %s -g "" "%s"' % (ignore, followlinks, show_hidden, no_ignore, dir)
elif default_tool["find"] and self._is_find_executable and os.name != 'nt':
wildignore = self._Lf_WildIgnore
ignore_dir = ""
for d in wildignore["dir"]:
ignore_dir += '-type d -name "%s" -prune -o ' % d
ignore_file = ""
for f in wildignore["file"]:
ignore_file += '-type f -name "%s" -o ' % f
if self._Lf_FollowLinks:
followlinks = "-L"
else:
followlinks = ""
if os.name == 'nt':
redir_err = ""
else:
redir_err = " 2>/dev/null"
if self._Lf_ShowHidden:
show_hidden = ""
else:
show_hidden = '-name ".*" -prune -o'
cmd = 'find %s "%s" -name "." -o %s %s %s -type f -print %s %s' % (followlinks,
dir,
ignore_dir,
ignore_file,
show_hidden,
redir_err)
else:
cmd = None
return cmd
def _file_list_cmd(self, root):
if self._Lf_GtagsSource == 1:
cmd = self._buildCmd(root)
elif self._Lf_GtagsSource == 2:
if os.path.exists(os.path.join(root, ".git")) and os.path.isdir(os.path.join(root, ".git")):
cmd = self._Lf_GtagsfilesCmd[".git"]
elif os.path.exists(os.path.join(root, ".hg")) and os.path.isdir(os.path.join(root, ".hg")):
cmd = self._Lf_GtagsfilesCmd[".hg"]
else:
cmd = self._Lf_GtagsfilesCmd["default"]
else:
cmd = None
return cmd
def _executeCmd(self, root, dbpath):
if not os.path.exists(dbpath):
os.makedirs(dbpath)
cmd = self._file_list_cmd(root)
if cmd:
if os.name == 'nt':
cmd = 'cd {}"{}" && ( {} ) | gtags {}{}{}{}--gtagslabel {} -f- "{}"'.format(self._cd_option, root, cmd,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, dbpath)
else:
cmd = 'cd {}"{}" && {{ {}; }} | gtags {}{}{}{}--gtagslabel {} -f- "{}"'.format(self._cd_option, root, cmd,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, dbpath)
else:
cmd = 'cd {}"{}" && gtags {}{}{}{}--gtagslabel {} "{}"'.format(self._cd_option, root,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf "%s" ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, dbpath)
env = os.environ
env["GTAGSFORCECPP"] = ""
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stderr=subprocess.PIPE, env=env)
_, error = proc.communicate()
def print_log(args):
print(args)
if error:
if self._has_nvim:
vim.async_call(print_log, cmd)
vim.async_call(print_log, error)
vim.async_call(print_log, "gtags error!")
else:
print(cmd)
print(error)
print("gtags error!")
else:
if self._has_nvim:
vim.async_call(print_log, "gtags generated successfully!")
else:
print("gtags generated successfully!")
def getStlCategory(self):
return 'Gtags'
def getStlCurDir(self):
return escQuote(lfEncode(os.getcwd()))
def cleanup(self):
for exe in self._executor:
exe.killProcess()
self._executor = []
def getPatternRegex(self):
return self._pattern_regex
def getResultFormat(self):
return self._result_format
def getLastResultFormat(self):
return self._last_result_format
#*****************************************************
# GtagsExplManager
#*****************************************************
class GtagsExplManager(Manager):
def __init__(self):
super(GtagsExplManager, self).__init__()
self._match_ids = []
self._match_path = False
def _getExplClass(self):
return GtagsExplorer
def _defineMaps(self):
lfCmd("call leaderf#Gtags#Maps()")
def _acceptSelection(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
if self._getExplorer().getResultFormat() is None:
file, line_num = line.split('\t', 2)[:2]
elif self._getExplorer().getResultFormat() == "ctags":
file, line_num = line.split('\t', 2)[1:]
elif self._getExplorer().getResultFormat() == "ctags-x":
line_num, file = line.split(None, 3)[1:3]
else: # ctags-mod
file, line_num = line.split('\t', 2)[:2]
if not os.path.isabs(file):
file = os.path.join(self._getInstance().getCwd(), lfDecode(file))
file = os.path.normpath(lfEncode(file))
try:
if kwargs.get("mode", '') == 't':
lfCmd("tab drop %s | %s" % (escSpecial(file), line_num))
else:
lfCmd("hide edit +%s %s" % (line_num, escSpecial(file)))
lfCmd("norm! zz")
lfCmd("setlocal cursorline! | redraw | sleep 20m | setlocal cursorline!")
except vim.error as e:
lfPrintError(e)
def updateGtags(self, filename, single_update, auto=True):
self._getExplorer().updateGtags(filename, single_update, auto)
def setArguments(self, arguments):
self._arguments = arguments
self._match_path = "--match-path" in arguments
def _getDigest(self, line, mode):
"""
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
if self._match_path:
return line
if mode == 2:
return line[:line.find('\t')]
else:
return line[line.find('\t', line.find('\t')) + 1:]
elif self._getExplorer().getResultFormat() == "ctags":
if mode == 2:
return line[line.find('\t')+1:]
else:
return line[:line.find('\t')]
return line
elif self._getExplorer().getResultFormat() == "ctags-x":
if mode == 2:
return line[line.find(' ') + 1:]
else:
return line[:line.find(' ')]
else:
return line
def _getDigestStartPos(self, line, mode):
"""
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
if self._match_path or mode == 2:
return 0
return lfBytesLen(line[:line.find('\t', line.find('\t'))]) + 1
elif self._getExplorer().getResultFormat() == "ctags":
if mode == 2:
return lfBytesLen(line[:line.find('\t')]) + 1
else:
return 0
elif self._getExplorer().getResultFormat() == "ctags-x":
if mode == 2:
return lfBytesLen(line[:line.find(' ')]) + 1
else:
return 0
else:
return 0
def _createHelp(self):
help = []
help.append('" <CR>/<double-click>/o : open file under cursor')
help.append('" x : open file under cursor in a horizontally split window')
help.append('" v : open file under cursor in a vertically split window')
help.append('" t : open file under cursor in a new tabpage')
help.append('" p : preview the result')
help.append('" d : delete the line under the cursor')
help.append('" i/<Tab> : switch to input mode')
help.append('" q : quit')
help.append('" <F1> : toggle this help')
help.append('" ---------------------------------------------------------')
return help
def _afterEnter(self):
super(GtagsExplManager, self)._afterEnter()
if self._getExplorer().getResultFormat() is None:
id = int(lfEval('''matchadd('Lf_hl_gtagsFileName', '^.\{-}\ze\t')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_gtagsLineNumber', '\t\zs\d\+\ze\t')'''))
self._match_ids.append(id)
elif self._getExplorer().getResultFormat() == "ctags":
id = int(lfEval('''matchadd('Lf_hl_gtagsFileName', '\t\zs.\{-}\ze\t')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_gtagsLineNumber', '\t\zs\d\+$')'''))
self._match_ids.append(id)
elif self._getExplorer().getResultFormat() == "ctags-x":
id = int(lfEval('''matchadd('Lf_hl_gtagsFileName', '^\S\+\s\+\d\+\s\+\zs\S\+')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_gtagsLineNumber', '^\S\+\s\+\zs\d\+')'''))
self._match_ids.append(id)
else: # ctags-mod
id = int(lfEval('''matchadd('Lf_hl_gtagsFileName', '^.\{-}\ze\t')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_gtagsLineNumber', '\t\zs\d\+\ze\t')'''))
self._match_ids.append(id)
try:
for i in self._getExplorer().getPatternRegex():
id = int(lfEval("matchadd('Lf_hl_gtagsHighlight', '%s', 9)" % escQuote(i)))
self._match_ids.append(id)
except vim.error:
pass
def _beforeExit(self):
super(GtagsExplManager, self)._beforeExit()
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._match_ids = []
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
def _previewResult(self, preview):
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window = orig_pos[:2]
self._acceptSelection(line)
finally:
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _bangEnter(self):
super(GtagsExplManager, self)._bangEnter()
if lfEval("exists('*timer_start')") == '0':
lfCmd("echohl Error | redraw | echo ' E117: Unknown function: timer_start' | echohl NONE")
return
if "--recall" not in self._arguments:
self._workInIdle(bang=True)
if self._read_finished < 2:
self._timer_id = lfEval("timer_start(1, 'leaderf#Gtags#TimerCallback', {'repeat': -1})")
else:
instance = self._getInstance()
if instance.isLastReverseOrder():
instance.window.cursor = (min(instance.cursorRow, len(instance.buffer)), 0)
else:
instance.window.cursor = (max(instance.cursorRow - instance.helpLength, 1), 0)
instance.window.options["cursorline"] = True
def deleteCurrentLine(self):
if vim.current.window.cursor[0] <= self._help_length:
return
lfCmd("setlocal modifiable")
line = vim.current.line
if len(self._content) > 0:
self._content.remove(line)
# `del vim.current.line` does not work in neovim
# https://github.com/neovim/neovim/issues/9361
del vim.current.buffer[vim.current.window.cursor[0] - 1]
lfCmd("setlocal nomodifiable")
def getArguments(self):
if self._getExplorer().getLastResultFormat() is not None and \
"--append" in self._arguments:
del self._arguments["--append"]
return self._arguments
def _supportsRefine(self):
return True
#*****************************************************
# gtagsExplManager is a singleton
#*****************************************************
gtagsExplManager = GtagsExplManager()
__all__ = ['gtagsExplManager']
|
coach.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import copy
from configparser import ConfigParser, Error
from rl_coach.core_types import EnvironmentSteps
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import atexit
import time
import sys
import json
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters, \
RunType, DistributedCoachSynchronizationType
from multiprocessing import Process
from multiprocessing.managers import BaseManager
import subprocess
from glob import glob
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, get_base_dir
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.memories.backend.memory_impl import construct_memory_params
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.s3_data_store import S3DataStoreParameters
from rl_coach.data_stores.nfs_data_store import NFSDataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store, construct_data_store_params
from rl_coach.training_worker import training_worker
from rl_coach.rollout_worker import rollout_worker
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
"""
open X11 based dashboard in a new process (nonblocking)
"""
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="/bin/bash")
subprocess.Popen(cmd, shell=True, executable="/bin/bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
"""
Runs the graph_manager using the configured task_parameters.
This stand-alone method is a convenience for multiprocessing.
"""
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only is not None:
steps_to_evaluate = task_parameters.evaluate_only if task_parameters.evaluate_only > 0 \
else sys.maxsize
graph_manager.evaluate(EnvironmentSteps(steps_to_evaluate))
else:
graph_manager.improve()
graph_manager.close()
def handle_distributed_coach_tasks(graph_manager, args, task_parameters):
ckpt_inside_container = "/checkpoint"
memory_backend_params = None
if args.memory_backend_params:
memory_backend_params = json.loads(args.memory_backend_params)
memory_backend_params['run_type'] = str(args.distributed_coach_run_type)
graph_manager.agent_params.memory.register_var('memory_backend_params', construct_memory_params(memory_backend_params))
data_store_params = None
if args.data_store_params:
data_store_params = construct_data_store_params(json.loads(args.data_store_params))
data_store_params.expt_dir = args.experiment_path
data_store_params.checkpoint_dir = ckpt_inside_container
graph_manager.data_store_params = data_store_params
if args.distributed_coach_run_type == RunType.TRAINER:
task_parameters.checkpoint_save_dir = ckpt_inside_container
training_worker(
graph_manager=graph_manager,
task_parameters=task_parameters,
is_multi_node_test=args.is_multi_node_test
)
if args.distributed_coach_run_type == RunType.ROLLOUT_WORKER:
task_parameters.checkpoint_restore_path = ckpt_inside_container
data_store = None
if args.data_store_params:
data_store = get_data_store(data_store_params)
rollout_worker(
graph_manager=graph_manager,
data_store=data_store,
num_workers=args.num_workers,
task_parameters=task_parameters
)
def handle_distributed_coach_orchestrator(args):
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes, \
RunTypeParameters
ckpt_inside_container = "/checkpoint"
arg_list = sys.argv[1:]
try:
i = arg_list.index('--distributed_coach_run_type')
arg_list.pop(i)
arg_list.pop(i)
except ValueError:
pass
trainer_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.TRAINER)] + arg_list
rollout_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.ROLLOUT_WORKER)] + arg_list
if '--experiment_name' not in rollout_command:
rollout_command = rollout_command + ['--experiment_name', args.experiment_name]
if '--experiment_name' not in trainer_command:
trainer_command = trainer_command + ['--experiment_name', args.experiment_name]
memory_backend_params = None
if args.memory_backend == "redispubsub":
memory_backend_params = RedisPubSubMemoryBackendParameters()
ds_params_instance = None
if args.data_store == "s3":
ds_params = DataStoreParameters("s3", "", "")
ds_params_instance = S3DataStoreParameters(ds_params=ds_params, end_point=args.s3_end_point, bucket_name=args.s3_bucket_name,
creds_file=args.s3_creds_file, checkpoint_dir=ckpt_inside_container, expt_dir=args.experiment_path)
elif args.data_store == "nfs":
ds_params = DataStoreParameters("nfs", "kubernetes", "")
ds_params_instance = NFSDataStoreParameters(ds_params)
worker_run_type_params = RunTypeParameters(args.image, rollout_command, run_type=str(RunType.ROLLOUT_WORKER), num_replicas=args.num_workers)
trainer_run_type_params = RunTypeParameters(args.image, trainer_command, run_type=str(RunType.TRAINER))
orchestration_params = KubernetesParameters([worker_run_type_params, trainer_run_type_params],
kubeconfig='~/.kube/config',
memory_backend_parameters=memory_backend_params,
data_store_params=ds_params_instance)
orchestrator = Kubernetes(orchestration_params)
if not orchestrator.setup():
print("Could not setup.")
return 1
if orchestrator.deploy_trainer():
print("Successfully deployed trainer.")
else:
print("Could not deploy trainer.")
return 1
if orchestrator.deploy_worker():
print("Successfully deployed rollout worker(s).")
else:
print("Could not deploy rollout worker(s).")
return 1
if args.dump_worker_logs:
screen.log_title("Dumping rollout worker logs in: {}".format(args.experiment_path))
orchestrator.worker_logs(path=args.experiment_path)
exit_code = 1
try:
exit_code = orchestrator.trainer_logs()
except KeyboardInterrupt:
pass
orchestrator.undeploy()
return exit_code
class CoachLauncher(object):
"""
This class is responsible for gathering all user-specified configuration options, parsing them,
instantiating a GraphManager and then starting that GraphManager with either improve() or evaluate().
This class is also responsible for launching multiple processes.
It is structured so that it can be sub-classed to provide alternate mechanisms to configure and launch
Coach jobs.
The key entry-point for this class is the .launch() method which is expected to be called from __main__
and handle absolutely everything for a job.
"""
def launch(self):
"""
Main entry point for the class, and the standard way to run coach from the command line.
Parses command-line arguments through argparse, instantiates a GraphManager and then runs it.
"""
parser = self.get_argument_parser()
args = self.get_config_args(parser)
graph_manager = self.get_graph_manager_from_args(args)
self.run_graph_manager(graph_manager, args)
def get_graph_manager_from_args(self, args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user.
:param args: the arguments given by the user
:return: the graph manager, not bound to task_parameters yet.
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
from rl_coach.agents.human_agent import HumanAgentParameters
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
# Set framework
# Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params
if hasattr(graph_manager, 'agent_params'):
for network_parameters in graph_manager.agent_params.network_wrappers.values():
network_parameters.framework = args.framework
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
graph_manager.visualization_parameters.print_networks_summary = args.print_networks_summary
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def display_all_presets_and_exit(self):
# list available presets
screen.log_title("Available Presets:")
for preset in sorted(list_all_presets()):
print(preset)
sys.exit(0)
def expand_preset(self, preset):
"""
Replace a short preset name with the full python path, and verify that it can be imported.
"""
if preset.lower() in [p.lower() for p in list_all_presets()]:
preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', preset))
else:
preset = "{}".format(preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(preset.split(":")) == 1:
preset += ":graph_manager"
# verify that the preset exists
preset_path = preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(preset))
return preset
def get_config_args(self, parser: argparse.ArgumentParser) -> argparse.Namespace:
"""
Returns a Namespace object with all the user-specified configuration options needed to launch.
This implementation uses argparse to take arguments from the CLI, but this can be over-ridden by
another method that gets its configuration from elsewhere. An equivalent method however must
return an identically structured Namespace object, which conforms to the structure defined by
get_argument_parser.
This method parses the arguments that the user entered, does some basic validation, and
modification of user-specified values in short form to be more explicit.
:param parser: a parser object which implicitly defines the format of the Namespace that
is expected to be returned.
:return: the parsed arguments as a Namespace
"""
args = parser.parse_args()
if args.nocolor:
screen.set_use_colors(False)
# if no arg is given
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# list available presets
if args.list:
self.display_all_presets_and_exit()
# Read args from config file for distributed Coach.
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
coach_config = ConfigParser({
'image': '',
'memory_backend': 'redispubsub',
'data_store': 's3',
's3_end_point': 's3.amazonaws.com',
's3_bucket_name': '',
's3_creds_file': ''
})
try:
coach_config.read(args.distributed_coach_config_path)
args.image = coach_config.get('coach', 'image')
args.memory_backend = coach_config.get('coach', 'memory_backend')
args.data_store = coach_config.get('coach', 'data_store')
if args.data_store == 's3':
args.s3_end_point = coach_config.get('coach', 's3_end_point')
args.s3_bucket_name = coach_config.get('coach', 's3_bucket_name')
args.s3_creds_file = coach_config.get('coach', 's3_creds_file')
except Error as e:
screen.error("Error when reading distributed Coach config file: {}".format(e))
if args.image == '':
screen.error("Image cannot be empty.")
data_store_choices = ['s3', 'nfs']
if args.data_store not in data_store_choices:
screen.warning("{} data store is unsupported.".format(args.data_store))
screen.error("Supported data stores are {}.".format(data_store_choices))
memory_backend_choices = ['redispubsub']
if args.memory_backend not in memory_backend_choices:
screen.warning("{} memory backend is not supported.".format(args.memory_backend))
screen.error("Supported memory backends are {}.".format(memory_backend_choices))
if args.data_store == 's3':
if args.s3_bucket_name == '':
screen.error("S3 bucket name cannot be empty.")
if args.s3_creds_file == '':
args.s3_creds_file = None
if args.play and args.distributed_coach:
screen.error("Playing is not supported in distributed Coach.")
# replace a short preset name with the full path
if args.preset is not None:
args.preset = self.expand_preset(args.preset)
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
screen.error("The requested checkpoint folder to load from does not exist.")
# validate the checkpoints args
if args.checkpoint_restore_file is not None and not glob(args.checkpoint_restore_file + '*'):
screen.error("The requested checkpoint file to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play and not args.environment_type:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.checkpoint_save_dir = os.path.join(args.experiment_path, 'checkpoint') if args.checkpoint_save_secs is not None else None
if args.export_onnx_graph and not args.checkpoint_save_secs:
screen.warning("Exporting ONNX graphs requires setting the --checkpoint_save_secs flag. "
"The --export_onnx_graph will have no effect.")
return args
def get_argument_parser(self) -> argparse.ArgumentParser:
"""
This returns an ArgumentParser object which defines the set of options that customers are expected to supply in order
to launch a coach job.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default='',
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow, mxnet",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(flag) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(int) Run evaluation only, for at least the given number of steps (note that complete "
"episodes are evaluated). This is a convenient way to disable training in order "
"to evaluate an existing checkpoint. If value is 0, or no value is provided, "
"evaluation will run for an infinite number of steps.",
nargs='?',
const=0,
type=int)
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('--nocolor',
help="(flag) Turn off color-codes in screen logging. Ascii text only",
action='store_true')
parser.add_argument('-s', '--checkpoint_save_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-crf', '--checkpoint_restore_file',
help='(string) Path to a checkpoint file to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset.",
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_networks_summary',
help="(flag) Print network summary to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
parser.add_argument('-onnx', '--export_onnx_graph',
help="(flag) Export the ONNX graph to the experiment directory. "
"This will have effect only if the --checkpoint_save_secs flag is used in order to store "
"checkpoints, since the weights checkpoint are needed for the ONNX graph. "
"Keep in mind that this can cause major overhead on the experiment. "
"Exporting ONNX graphs requires manually installing the tf2onnx package "
"(https://github.com/onnx/tensorflow-onnx).",
action='store_true')
parser.add_argument('-dc', '--distributed_coach',
help="(flag) Use distributed Coach.",
action='store_true')
parser.add_argument('-dcp', '--distributed_coach_config_path',
help="(string) Path to config file when using distributed rollout workers."
"Only distributed Coach parameters should be provided through this config file."
"Rest of the parameters are provided using Coach command line options."
"Used only with --distributed_coach flag."
"Ignored if --distributed_coach flag is not used.",
type=str)
parser.add_argument('--memory_backend_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--data_store_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--distributed_coach_run_type',
help=argparse.SUPPRESS,
type=RunType,
default=RunType.ORCHESTRATOR,
choices=list(RunType))
parser.add_argument('-asc', '--apply_stop_condition',
help="(flag) If set, this will apply a stop condition on the run, defined by reaching a"
"target success rate as set by the environment or a custom success rate as defined "
"in the preset. ",
action='store_true')
parser.add_argument('--dump_worker_logs',
help="(flag) Only used in distributed coach. If set, the worker logs are saved in the experiment dir",
action='store_true')
parser.add_argument('--is_multi_node_test',
help=argparse.SUPPRESS,
action='store_true')
return parser
def run_graph_manager(self, graph_manager: 'GraphManager', args: argparse.Namespace):
if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
screen.error("{} algorithm is not supported using distributed Coach.".format(graph_manager.agent_params.algorithm))
if args.distributed_coach and args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
screen.warning("The --checkpoint_save_secs or -s argument will be ignored as SYNC distributed coach sync type is used. Checkpoint will be saved every training iteration.")
if args.distributed_coach and not args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
screen.error("Distributed coach with ASYNC distributed coach sync type requires --checkpoint_save_secs or -s.")
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary and not args.distributed_coach:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
if args.checkpoint_restore_dir is not None and args.checkpoint_restore_file is not None:
raise ValueError("Only one of the checkpoint_restore_dir and checkpoint_restore_file arguments can be used"
" simulatenously.")
checkpoint_restore_path = args.checkpoint_restore_dir if args.checkpoint_restore_dir \
else args.checkpoint_restore_file
task_parameters = TaskParameters(
framework_type=args.framework,
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu,
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_path=checkpoint_restore_path,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
handle_distributed_coach_tasks(graph_manager, args, task_parameters)
return
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
exit(handle_distributed_coach_orchestrator(args))
# Single-threaded runs
if args.num_workers == 1:
self.start_single_threaded(task_parameters, graph_manager, args)
else:
self.start_multi_threaded(graph_manager, args)
@staticmethod
def start_single_threaded(task_parameters, graph_manager: 'GraphManager', args: argparse.Namespace):
# Start the training or evaluation
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
@staticmethod
def start_multi_threaded(graph_manager: 'GraphManager', args: argparse.Namespace):
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
ps_hosts = "localhost:{}".format(get_open_port())
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
# Shared memory
class CommManager(BaseManager):
pass
CommManager.register('SharedMemoryScratchPad', SharedMemoryScratchPad, exposed=['add', 'get', 'internal_call'])
comm_manager = CommManager()
comm_manager.start()
shared_memory_scratchpad = comm_manager.SharedMemoryScratchPad()
if args.checkpoint_restore_file:
raise ValueError("Multi-Process runs only support restoring checkpoints from a directory, "
"and not from a file. ")
def start_distributed_task(job_type, task_index, evaluation_worker=False,
shared_memory_scratchpad=shared_memory_scratchpad):
task_parameters = DistributedTaskParameters(
framework_type=args.framework,
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=0 if evaluation_worker else None, # 0 value for evaluation worker as it should run infinitely
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=shared_memory_scratchpad,
seed=args.seed+task_index if args.seed is not None else None, # each worker gets a different seed
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_path=args.checkpoint_restore_dir, # MonitoredTrainingSession only supports a dir
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
p = Process(target=start_graph, args=(graph_manager, task_parameters))
# p.daemon = True
p.start()
return p
# parameter server
parameter_server = start_distributed_task("ps", 0)
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
workers = []
workers.append(start_distributed_task("worker", 0))
time.sleep(2)
for task_index in range(1, args.num_workers):
workers.append(start_distributed_task("worker", task_index))
# evaluation worker
if args.evaluation_worker or args.render:
evaluation_worker = start_distributed_task("worker", args.num_workers, evaluation_worker=True)
# wait for all workers
[w.join() for w in workers]
if args.evaluation_worker:
evaluation_worker.terminate()
def main():
launcher = CoachLauncher()
launcher.launch()
if __name__ == "__main__":
main()
|
conftest.py
|
import pathlib
from multiprocessing import Process
import docker
import pytest
from testcontainers.compose import DockerCompose
from .pact_provider import run_server
@pytest.fixture(scope="module")
def server():
proc = Process(target=run_server, args=(), daemon=True)
proc.start()
yield proc
# Cleanup after test
proc.kill()
def pytest_addoption(parser):
parser.addoption(
"--publish-pact", type=str, action="store", help="Upload generated pact file to pact broker with version"
)
parser.addoption("--run-broker", type=bool, action="store", help="Whether to run broker in this test or not.")
@pytest.fixture(scope="session", autouse=True)
def publish_existing_pact(broker):
"""Publish the contents of the pacts folder to the Pact Broker.
In normal usage, a Consumer would publish Pacts to the Pact Broker after
running tests - this fixture would NOT be needed.
.
Because the broker is being used standalone here, it will not contain the
required Pacts, so we must first spin up the pact-cli and publish them.
In the Pact Broker logs, this corresponds to the following entry:
PactBroker::Pacts::Service -- Creating new pact publication with params \
{:consumer_name=>"UserServiceClient", :provider_name=>"UserService", \
:revision_number=>nil, :consumer_version_number=>"1", :pact_version_sha=>nil, \
:consumer_name_in_pact=>"UserServiceClient", :provider_name_in_pact=>"UserService"}
"""
source = str(pathlib.Path.cwd().joinpath("..", "pacts").resolve())
pacts = [f"{source}:/pacts"]
envs = {
"PACT_BROKER_BASE_URL": "http://broker_app:9292",
"PACT_BROKER_USERNAME": "pactbroker",
"PACT_BROKER_PASSWORD": "pactbroker",
}
client = docker.from_env()
print("Publishing existing Pact")
client.containers.run(
remove=True,
network="broker_default",
volumes=pacts,
image="pactfoundation/pact-cli:latest",
environment=envs,
command="publish /pacts --consumer-app-version 1",
)
print("Finished publishing")
# This fixture is to simulate a managed Pact Broker or Pactflow account.
# For almost all purposes outside this example, you will want to use a real
# broker. See https://github.com/pact-foundation/pact_broker for further details.
@pytest.fixture(scope="session", autouse=True)
def broker(request):
version = request.config.getoption("--publish-pact")
publish = True if version else False
# If the results are not going to be published to the broker, there is
# nothing further to do anyway
if not publish:
yield
return
run_broker = request.config.getoption("--run-broker")
if run_broker:
# Start up the broker using docker-compose
print("Starting broker")
with DockerCompose("../broker", compose_file_name=["docker-compose.yml"], pull=True) as compose:
stdout, stderr = compose.get_logs()
if stderr:
print("Errors\\n:{}".format(stderr))
print("{}".format(stdout))
print("Started broker")
yield
print("Stopping broker")
print("Broker stopped")
else:
# Assuming there is a broker available already, docker-compose has been
# used manually as the --run-broker option has not been provided
yield
return
|
conftest.py
|
import builtins
from multiprocessing import Process
import sys
import time
import os
import shutil
import tempfile
import pytest
import torch
import syft
from syft import TorchHook
from syft.generic.frameworks.hook import hook_args
from syft.workers.websocket_client import WebsocketClientWorker
from syft.workers.websocket_server import WebsocketServerWorker
def pytest_configure(config):
config.addinivalue_line(
"markers", "translation: mark test to run only as part of the translation test suite"
)
def pytest_sessionstart(session):
session.failed_tests = set()
def pytest_runtest_makereport(item, call): # pragma: no cover
if call.excinfo is not None and item.originalname:
item.session.failed_tests.add(item.originalname)
def pytest_runtest_setup(item): # pragma: no cover
if item.originalname in item.session.failed_tests:
pytest.skip(f"previous test failed ({item.name})")
def _start_proc(participant, dataset: str = None, **kwargs): # pragma: no cover
"""Helper function for spinning up a websocket participant."""
def target():
server = participant(**kwargs)
if dataset is not None:
data, key = dataset
server.add_dataset(data, key=key)
server.start()
p = Process(target=target)
p.start()
return p
def instantiate_websocket_client_worker(max_tries=5, sleep_time=0.1, **kwargs): # pragma: no cover
"""Helper function to instantiate the websocket client.
If a connection is refused, we wait a bit (`sleep_time` seconds) and try again.
After `max_tries` failed tries, a ConnectionRefusedError is raised.
"""
retry_counter = 0
connection_open = False
while not connection_open:
try:
remote_proxy = WebsocketClientWorker(**kwargs)
connection_open = True
except ConnectionRefusedError as e:
if retry_counter < max_tries:
retry_counter += 1
time.sleep(sleep_time)
else:
raise e
return remote_proxy
@pytest.fixture()
def start_proc(): # pragma: no cover
return _start_proc
@pytest.fixture()
def start_remote_worker(): # pragma: no cover
"""Helper function for starting a websocket worker."""
def _start_remote_worker(
id, hook, dataset: str = None, host="0.0.0.0", port=8768, max_tries=5, sleep_time=0.01
):
kwargs = {"id": id, "host": host, "port": port, "hook": hook}
server = _start_proc(WebsocketServerWorker, dataset=dataset, **kwargs)
remote_proxy = instantiate_websocket_client_worker(
max_tries=max_tries, sleep_time=sleep_time, **kwargs
)
return server, remote_proxy
return _start_remote_worker
@pytest.fixture()
def start_remote_server_worker_only(): # pragma: no cover
"""Helper function for starting a websocket worker."""
def _start_remote_worker(
id, hook, dataset: str = None, host="localhost", port=8768, max_tries=5, sleep_time=0.01
):
kwargs = {"id": id, "host": host, "port": port, "hook": hook}
server = _start_proc(WebsocketServerWorker, dataset=dataset, **kwargs)
return server
return _start_remote_worker
# This fixture is only used by the notebook tests, which run separately from the
# test coverage checker in CI and are thus excluded from the coverage checks.
@pytest.yield_fixture(scope="function")
def isolated_filesystem(): # pragma: no cover
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
shutil.copytree("examples/tutorials/", t + "/examples")
# Path(t + "/data/").mkdir(parents=True, exist_ok=True)
shutil.copytree("examples/data/", t + "/data/")
os.chdir(t + "/examples")
try:
yield t
finally:
os.chdir(cwd)
shutil.rmtree(t)
@pytest.fixture(scope="session", autouse=True)
def hook():
hook = TorchHook(torch)
return hook
@pytest.fixture(scope="function", autouse=True)
def workers(hook):
# To run a plan locally the local worker can't be a client worker,
# since it needs to register objects
# LaRiffle edit: doing this increases the reference count on pointers and
# breaks the auto garbage collection for pointer of pointers, see #2150
# hook.local_worker.is_client_worker = False
# Reset the hook and the local worker
syft.local_worker.clear_objects()
hook_args.hook_method_args_functions = {}
hook_args.hook_method_response_functions = {}
hook_args.register_response_functions = {}
hook_args.get_tensor_type_functions = {}
# Define 4 virtual workers
alice = syft.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
bob = syft.VirtualWorker(id="bob", hook=hook, is_client_worker=False)
charlie = syft.VirtualWorker(id="charlie", hook=hook, is_client_worker=False)
james = syft.VirtualWorker(id="james", hook=hook, is_client_worker=False)
workers = {
"me": hook.local_worker,
"alice": alice,
"bob": bob,
"charlie": charlie,
"james": james,
}
yield workers
alice.remove_worker_from_local_worker_registry()
bob.remove_worker_from_local_worker_registry()
charlie.remove_worker_from_local_worker_registry()
james.remove_worker_from_local_worker_registry()
@pytest.fixture
def hide_module():
import_orig = builtins.__import__
# When we check for imports in dependency_check, we don't actually attempt
# to import each package, so popping a module from sys.modules and mocking
# the import statement is not sufficient to simulate the dependency check
# for when the dependency is absent. The way we check for dependencies
# (importlib.util.find_spec) uses module Finders in the sys.meta_path when
# checking for module specs, so we need to mock the find_spec method of the
# Finder that will discover the module we want to hide. That Finder happens
# to be in position three of the meta path.
find_spec_orig = sys.meta_path[3].find_spec
def mocked_import(name, globals, locals, fromlist, level):
if name in ["tensorflow", "tf_encrypted", "torch"]:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
def mocked_find_spec(self, fullname, target=None):
if self in ["tensorflow", "tf_encrypted"]:
return None
return find_spec_orig(self, fullname, target)
builtins.__import__ = mocked_import
sys.meta_path[3].find_spec = mocked_find_spec
yield
builtins.__import__ = import_orig
sys.meta_path[3].find_spec = find_spec_orig
|
scanner.py
|
import ipaddress
import os
import socket
import struct
import sys
import threading
import time
SUBNET = '192.168.1.0/24'
MESSAGE = 'PYTHONRULES!'
class IP:
def __init__(self, buff=None):
header = struct.unpack('<BBHHHBBH4s4s', buff)
self.ver = header[0] >> 4
self.ihl = header[0] & 0xF
self.tos = header[1]
self.len = header[2]
self.id = header[3]
self.offset = header[4]
self.ttl = header[5]
self.protocol_num = header[6]
self.sum = header[7]
self.src = header[8]
self.dst = header[9]
# human readable IP addresses
self.src_address = ipaddress.ip_address(self.src)
self.dst_address = ipaddress.ip_address(self.dst)
# map protocol constants to their names
self.protocol_map = {1: "ICMP", 6: "TCP", 17: "UDP"}
try:
self.protocol = self.protocol_map[self.protocol_num]
except Exception as e:
print('%s No protocol for %s' % (e, self.protocol_num))
self.protocol = str(self.protocol_num)
class ICMP:
def __init__(self, buff):
header = struct.unpack('<BBHHH', buff)
self.type = header[0]
self.code = header[1]
self.sum = header[2]
self.id = header[3]
self.seq = header[4]
def udp_sender():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sender:
for ip in ipaddress.ip_network(SUBNET).hosts():
time.sleep(1)
print('+', end='')
sender.sendto(bytes(MESSAGE, 'utf8'), (str(ip), 65212))
class Scanner:
def __init__(self, host):
self.host = host
if os.name == 'nt':
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
self.socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
self.socket.bind((host, 0))
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
print('hitting promiscuous mode...')
if os.name == 'nt':
self.socket.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
def sniff(self):
hosts_up = set([f'{str(self.host)} *'])
try:
while True:
print('.',end='')
raw_buffer = self.socket.recvfrom(65535)[0]
ip_header = IP(raw_buffer[0:20])
if ip_header.protocol == "ICMP":
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + 8]
icmp_header = ICMP(buf)
if icmp_header.code == 3 and icmp_header.type == 3:
if ipaddress.ip_address(ip_header.src_address) in ipaddress.IPv4Network(SUBNET):
if raw_buffer[len(raw_buffer) - len(MESSAGE): ] == bytes(MESSAGE, 'utf8'):
hosts_up.add(str(ip_header.src_address))
print(f'Host Up: {str(ip_header.src_address)}')
# handle CTRL-C
except KeyboardInterrupt:
if os.name == 'nt':
self.socket.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
print('\nUser interrupted.')
if hosts_up:
print(f'\n\nSummary: Hosts up on {SUBNET}')
for host in sorted(hosts_up):
print(f'{host}')
print('')
sys.exit()
if __name__ == '__main__':
if len(sys.argv) == 2:
host = sys.argv[1]
else:
host = '192.168.1.203'
s = Scanner(host)
time.sleep(10)
t = threading.Thread(target=udp_sender)
t.start()
s.sniff()
|
test_lib.py
|
#!/usr/bin/env python
"""A library for tests."""
from __future__ import division
import codecs
import datetime
import email
import functools
import logging
import os
import pdb
import platform
import shutil
import socket
import sys
import tempfile
import threading
import time
import unittest
from builtins import range # pylint: disable=redefined-builtin
from future.utils import iteritems
import mock
import pkg_resources
import unittest
from grr_response_client import comms
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_server import access_control
from grr_response_server import aff4
from grr_response_server import artifact
from grr_response_server import client_index
from grr_response_server import data_store
from grr_response_server import email_alerts
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.aff4_objects import filestore
from grr_response_server.aff4_objects import users
from grr_response_server.flows.general import audit
from grr_response_server.hunts import results as hunts_results
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import testing_startup
FIXED_TIME = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("8d")
TEST_CLIENT_ID = rdf_client.ClientURN("C.1000000000000000")
class GRRBaseTest(unittest.TestCase):
"""This is the base class for all GRR tests."""
use_relational_reads = False
def __init__(self, methodName=None): # pylint: disable=g-bad-name
"""Hack around unittest's stupid constructor.
We sometimes need to instantiate the test suite without running any tests -
e.g. to start initialization or setUp() functions. The unittest constructor
requires to provide a valid method name.
Args:
methodName: The test method to run.
"""
super(GRRBaseTest, self).__init__(methodName=methodName or "__init__")
self.base_path = config.CONFIG["Test.data_dir"]
test_user = u"test"
users.GRRUser.SYSTEM_USERS.add(test_user)
self.token = access_control.ACLToken(
username=test_user, reason="Running tests")
_set_up_lock = threading.RLock()
_set_up_done = False
@classmethod
def setUpClass(cls):
super(GRRBaseTest, cls).setUpClass()
with GRRBaseTest._set_up_lock:
if not GRRBaseTest._set_up_done:
testing_startup.TestInit()
GRRBaseTest._set_up_done = True
def setUp(self):
super(GRRBaseTest, self).setUp()
self.temp_dir = TempDirPath()
config.CONFIG.SetWriteBack(os.path.join(self.temp_dir, "writeback.yaml"))
logging.info("Starting test: %s.%s", self.__class__.__name__,
self._testMethodName)
self.last_start_time = time.time()
data_store.DB.ClearTestDB()
# Each datastore is wrapped with DatabaseValidationWrapper, so we have
# to access the delegate directly (assuming it's an InMemoryDB
# implementation).
data_store.REL_DB.delegate.ClearTestDB()
aff4.FACTORY.Flush()
# Create a Foreman and Filestores, they are used in many tests.
aff4_grr.GRRAFF4Init().Run()
filestore.FileStoreInit().Run()
hunts_results.ResultQueueInitHook().Run()
email_alerts.EmailAlerterInit().RunOnce()
audit.AuditEventListener.created_logs.clear()
# Stub out the email function
self.emails_sent = []
def SendEmailStub(to_user, from_user, subject, message, **unused_kwargs):
self.emails_sent.append((to_user, from_user, subject, message))
self.mail_stubber = utils.MultiStubber(
(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmailStub),
(email.utils, "make_msgid", lambda: "<message id stub>"))
self.mail_stubber.Start()
# We don't want to send actual email in our tests
self.smtp_patcher = mock.patch("smtplib.SMTP")
self.mock_smtp = self.smtp_patcher.start()
def DisabledSet(*unused_args, **unused_kw):
raise NotImplementedError(
"Usage of Set() is disabled, please use a configoverrider in tests.")
self.config_set_disable = utils.Stubber(config.CONFIG, "Set", DisabledSet)
self.config_set_disable.Start()
if self.use_relational_reads:
self.relational_read_stubber = utils.Stubber(
data_store, "RelationalDBReadEnabled", lambda: True)
self.relational_read_stubber.Start()
def tearDown(self):
super(GRRBaseTest, self).tearDown()
self.config_set_disable.Stop()
self.smtp_patcher.stop()
self.mail_stubber.Stop()
if self.use_relational_reads:
self.relational_read_stubber.Stop()
logging.info("Completed test: %s.%s (%.4fs)", self.__class__.__name__,
self._testMethodName,
time.time() - self.last_start_time)
# This may fail on filesystems which do not support unicode filenames.
try:
shutil.rmtree(self.temp_dir, True)
except UnicodeError:
pass
def _AssertRDFValuesEqual(self, x, y):
x_has_lsf = hasattr(x, "ListSetFields")
y_has_lsf = hasattr(y, "ListSetFields")
if x_has_lsf != y_has_lsf:
raise AssertionError("%s != %s" % (x, y))
if not x_has_lsf:
if isinstance(x, float):
self.assertAlmostEqual(x, y)
else:
self.assertEqual(x, y)
return
processed = set()
for desc, value in x.ListSetFields():
processed.add(desc.name)
self._AssertRDFValuesEqual(value, y.Get(desc.name))
for desc, value in y.ListSetFields():
if desc.name not in processed:
self._AssertRDFValuesEqual(value, x.Get(desc.name))
def assertRDFValuesEqual(self, x, y):
"""Check that two RDFStructs are equal."""
self._AssertRDFValuesEqual(x, y)
def _SetupClientImpl(self,
client_nr,
index=None,
arch="x86_64",
fqdn=None,
install_time=None,
last_boot_time=None,
kernel="4.0.0",
os_version="buster/sid",
ping=None,
system="Linux",
memory_size=None,
add_cert=True,
fleetspeak_enabled=False):
client_id_urn = rdf_client.ClientURN("C.1%015x" % client_nr)
with aff4.FACTORY.Create(
client_id_urn, aff4_grr.VFSGRRClient, mode="rw",
token=self.token) as fd:
if add_cert:
cert = self.ClientCertFromPrivateKey(
config.CONFIG["Client.private_key"])
fd.Set(fd.Schema.CERT, cert)
fd.Set(fd.Schema.CLIENT_INFO, self._TestClientInfo())
fd.Set(fd.Schema.PING, ping or rdfvalue.RDFDatetime.Now())
if fqdn is not None:
fd.Set(fd.Schema.HOSTNAME(fqdn.split(".", 1)[0]))
fd.Set(fd.Schema.FQDN(fqdn))
else:
fd.Set(fd.Schema.HOSTNAME("Host-%x" % client_nr))
fd.Set(fd.Schema.FQDN("Host-%x.example.com" % client_nr))
fd.Set(
fd.Schema.MAC_ADDRESS(
"aabbccddee%02x\nbbccddeeff%02x" % (client_nr, client_nr)))
fd.Set(
fd.Schema.HOST_IPS(
"192.168.0.%d\n2001:abcd::%x" % (client_nr, client_nr)))
if system:
fd.Set(fd.Schema.SYSTEM(system))
if os_version:
fd.Set(fd.Schema.OS_VERSION(os_version))
if arch:
fd.Set(fd.Schema.ARCH(arch))
if kernel:
fd.Set(fd.Schema.KERNEL(kernel))
if memory_size:
fd.Set(fd.Schema.MEMORY_SIZE(memory_size))
if last_boot_time:
fd.Set(fd.Schema.LAST_BOOT_TIME(last_boot_time))
if install_time:
fd.Set(fd.Schema.INSTALL_DATE(install_time))
if fleetspeak_enabled:
fd.Set(fd.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))
kb = rdf_client.KnowledgeBase()
kb.fqdn = fqdn or "Host-%x.example.com" % client_nr
kb.users = [
rdf_client.User(username="user1"),
rdf_client.User(username="user2"),
]
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(fd.Schema.KNOWLEDGE_BASE, kb)
fd.Set(fd.Schema.INTERFACES(self._TestInterfaces(client_nr)))
hardware_info = fd.Schema.HARDWARE_INFO()
hardware_info.system_manufacturer = ("System-Manufacturer-%x" % client_nr)
hardware_info.bios_version = ("Bios-Version-%x" % client_nr)
fd.Set(fd.Schema.HARDWARE_INFO, hardware_info)
fd.Flush()
index.AddClient(fd)
return client_id_urn
def SetupClient(self,
client_nr,
arch="x86_64",
fqdn=None,
last_boot_time=None,
install_time=None,
kernel="4.0.0",
os_version="buster/sid",
ping=None,
system="Linux",
memory_size=None,
add_cert=True,
fleetspeak_enabled=False):
"""Prepares a test client mock to be used.
Args:
client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd
in canonical representation.
arch: string
fqdn: string
last_boot_time: RDFDatetime
install_time: RDFDatetime
kernel: string
os_version: string
ping: RDFDatetime
system: string
memory_size: bytes
add_cert: boolean
fleetspeak_enabled: boolean
Returns:
rdf_client.ClientURN
"""
# Make it possible to use SetupClient for both REL_DB and legacy tests.
self.SetupTestClientObject(
client_nr,
add_cert=add_cert,
arch=arch,
fqdn=fqdn,
install_time=install_time,
last_boot_time=last_boot_time,
kernel=kernel,
memory_size=memory_size,
os_version=os_version,
ping=ping or rdfvalue.RDFDatetime.Now(),
system=system,
fleetspeak_enabled=fleetspeak_enabled)
with client_index.CreateClientIndex(token=self.token) as index:
client_id_urn = self._SetupClientImpl(
client_nr,
index=index,
arch=arch,
fqdn=fqdn,
install_time=install_time,
last_boot_time=last_boot_time,
kernel=kernel,
os_version=os_version,
ping=ping,
system=system,
memory_size=memory_size,
add_cert=add_cert,
fleetspeak_enabled=fleetspeak_enabled)
return client_id_urn
def SetupClients(self, nr_clients, *args, **kwargs):
"""Prepares nr_clients test client mocks to be used."""
return self.SetupClientsWithIndices(range(nr_clients), *args, **kwargs)
def SetupClientsWithIndices(self, indices, *args, **kwargs):
"""Sets up mock clients, one for each numerical index in 'indices'."""
return [self.SetupClient(i, *args, **kwargs) for i in indices]
def _TestClientInfo(self):
return rdf_client.ClientInformation(
client_name="GRR Monitor",
client_version=config.CONFIG["Source.version_numeric"],
build_time="1980-01-01",
labels=["label1", "label2"])
def _TestInterfaces(self, client_nr):
ip1 = rdf_client_network.NetworkAddress()
ip1.human_readable_address = "192.168.0.%d" % client_nr
ip2 = rdf_client_network.NetworkAddress()
ip2.human_readable_address = "2001:abcd::%x" % client_nr
mac1 = rdf_client_network.MacAddress()
mac1.human_readable_address = "aabbccddee%02x" % client_nr
mac2 = rdf_client_network.MacAddress()
mac2.human_readable_address = "bbccddeeff%02x" % client_nr
return [
rdf_client_network.Interface(addresses=[ip1, ip2]),
rdf_client_network.Interface(mac_address=mac1),
rdf_client_network.Interface(mac_address=mac2),
]
def SetupTestClientObjects(self,
client_count,
add_cert=True,
arch="x86_64",
fqdn=None,
install_time=None,
last_boot_time=None,
kernel="4.0.0",
memory_size=None,
os_version="buster/sid",
ping=None,
system="Linux",
labels=None,
fleetspeak_enabled=False):
res = {}
for client_nr in range(client_count):
client = self.SetupTestClientObject(
client_nr,
add_cert=add_cert,
arch=arch,
fqdn=fqdn,
install_time=install_time,
last_boot_time=last_boot_time,
kernel=kernel,
memory_size=memory_size,
os_version=os_version,
ping=ping,
system=system,
labels=labels,
fleetspeak_enabled=fleetspeak_enabled)
res[client.client_id] = client
return res
def SetupTestClientObject(self,
client_nr,
add_cert=True,
arch="x86_64",
fqdn=None,
install_time=None,
last_boot_time=None,
kernel="4.0.0",
memory_size=None,
os_version="buster/sid",
ping=None,
system="Linux",
labels=None,
fleetspeak_enabled=False):
"""Prepares a test client object."""
client_id = u"C.1%015x" % client_nr
client = rdf_objects.ClientSnapshot(client_id=client_id)
client.startup_info.client_info = self._TestClientInfo()
if last_boot_time is not None:
client.startup_info.boot_time = last_boot_time
client.knowledge_base.fqdn = fqdn or "Host-%x.example.com" % client_nr
client.knowledge_base.os = system
client.knowledge_base.users = [
rdf_client.User(username=u"user1"),
rdf_client.User(username=u"user2"),
]
client.os_version = os_version
client.arch = arch
client.kernel = kernel
client.interfaces = self._TestInterfaces(client_nr)
client.install_time = install_time
client.hardware_info = rdf_client.HardwareInfo(
system_manufacturer="System-Manufacturer-%x" % client_nr,
bios_version="Bios-Version-%x" % client_nr)
if memory_size is not None:
client.memory_size = memory_size
ping = ping or rdfvalue.RDFDatetime.Now()
if add_cert:
cert = self.ClientCertFromPrivateKey(config.CONFIG["Client.private_key"])
else:
cert = None
data_store.REL_DB.WriteClientMetadata(
client_id,
last_ping=ping,
certificate=cert,
fleetspeak_enabled=fleetspeak_enabled)
data_store.REL_DB.WriteClientSnapshot(client)
client_index.ClientIndex().AddClient(client)
if labels:
data_store.REL_DB.AddClientLabels(client_id, u"GRR", labels)
client_index.ClientIndex().AddClientLabels(
client_id, data_store.REL_DB.ReadClientLabels(client_id))
return client
def AddClientLabel(self, client_id, owner, name):
if data_store.RelationalDBReadEnabled():
if hasattr(client_id, "Basename"):
client_id = client_id.Basename()
data_store.REL_DB.AddClientLabels(client_id, owner, [name])
client_index.ClientIndex().AddClientLabels(client_id, [name])
else:
with aff4.FACTORY.Open(
client_id, mode="rw", token=self.token) as client_obj:
client_obj.AddLabel(name, owner=owner)
with client_index.CreateClientIndex(token=self.token) as index:
index.AddClient(client_obj)
def ClientCertFromPrivateKey(self, private_key):
communicator = comms.ClientCommunicator(private_key=private_key)
csr = communicator.GetCSR()
return rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)
def GenerateToken(self, username, reason):
return access_control.ACLToken(username=username, reason=reason)
class ConfigOverrider(object):
"""A context to temporarily change config options."""
def __init__(self, overrides):
self._overrides = overrides
self._saved_values = {}
def __enter__(self):
self.Start()
def Start(self):
for k, v in iteritems(self._overrides):
self._saved_values[k] = config.CONFIG.GetRaw(k)
try:
config.CONFIG.SetRaw.old_target(k, v)
except AttributeError:
config.CONFIG.SetRaw(k, v)
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
for k, v in iteritems(self._saved_values):
try:
config.CONFIG.SetRaw.old_target(k, v)
except AttributeError:
config.CONFIG.SetRaw(k, v)
class PreserveConfig(object):
def __enter__(self):
self.Start()
def Start(self):
self.old_config = config.CONFIG
config.CONFIG = self.old_config.MakeNewConfig()
config.CONFIG.initialized = self.old_config.initialized
config.CONFIG.SetWriteBack(self.old_config.writeback.filename)
config.CONFIG.raw_data = self.old_config.raw_data.copy()
config.CONFIG.writeback_data = self.old_config.writeback_data.copy()
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
config.CONFIG = self.old_config
class FakeTime(object):
"""A context manager for faking time."""
def __init__(self, fake_time, increment=0):
if isinstance(fake_time, rdfvalue.RDFDatetime):
self.time = fake_time.AsMicrosecondsSinceEpoch() / 1e6
else:
self.time = fake_time
self.increment = increment
def __enter__(self):
self.old_time = time.time
def Time():
self.time += self.increment
return self.time
time.time = Time
self.old_strftime = time.strftime
def Strftime(form, t=time.localtime(Time())):
return self.old_strftime(form, t)
time.strftime = Strftime
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
time.time = self.old_time
time.strftime = self.old_strftime
# TODO(hanuszczak): `FakeTime` and `FakeTimeline` serve a similar purpose,
# although `FakeTimeline` (arguably) allows to write more sophisticated tests.
# Therefore, it should be possible to rewrite existing test code to use
# `FakeTimeline` instead of `FakeTime`. Once done, `FakeTime` should be removed.
# TODO(hanuszczak): Write proper documentation.
class FakeTimeline(object):
"""A context manager for testing time-aware code.
This utility class overrides `time.sleep` and `time.time` methods so that the
code that uses them can be tested. It is assumed that the code that needs to
be tested runs on some thread. Using `Run` method one can simulate running
this thread for certain amount of time but without spending that time waiting
for anything.
While internally the simulation actually executes the code on a separate
thread, it can be thought as if the code was executed synchronously on the
current thread. However, the time flow is "immediate" and `time.sleep` calls
do not really block.
For example, it is possible to instantly simulate running a thread for half an
hour (assuming that most of that time the thread would be spent sleeping).
In order to reliably test flow of time-aware code, it is assumed that only the
`time.sleep` function causes the time flow. In other words, every non-`sleep`
line of code is assumed to be executed instantly. In particular, if there is
an infinite loop without any `time.sleep` calls the running the simulation
for any number of seconds will block indefinitely. This is not a big issue
since this class is intended to be used only for testing purposes.
"""
class _WorkerThreadExit(BaseException):
pass
def __init__(self, thread, now=None):
"""Initializes the timeline.
Args:
thread: A thread to perform controlled execution on.
now: An `RDFDatetime` object representing starting point of the timeline.
If no value is provided, current time is used.
Raises:
TypeError: If `thread` is not an instance of `Thread` or if `now` is not
an instance of `RDFDatetime`.
"""
if not isinstance(thread, threading.Thread):
raise TypeError("`thread` is not an instance of `threading.Thread`")
if now is not None and not isinstance(now, rdfvalue.RDFDatetime):
raise TypeError("`now` is not an instance of `rdfvalue.RDFDatetime`")
self._thread = thread
self._owner_thread_turn = threading.Event()
self._worker_thread_turn = threading.Event()
# Fake, "current" number of seconds since epoch.
self._time = (now or rdfvalue.RDFDatetime.Now()).AsSecondsSinceEpoch()
# Number of seconds that the worker thread can sleep.
self._budget = 0
self._worker_thread = None
self._worker_thread_done = False
self._worker_thread_exception = None
def Run(self, duration):
"""Simulated running the underlying thread for the specified duration.
Args:
duration: A `Duration` object describing for how long simulate the thread.
Raises:
TypeError: If `duration` is not an instance of `rdfvalue.Duration`.
AssertionError: If this method is called without automatic context.
"""
if not isinstance(duration, rdfvalue.Duration):
raise TypeError("`duration` is not an instance of `rdfvalue.Duration")
if self._worker_thread is None:
raise AssertionError("Worker thread hasn't been started (method was "
"probably called without context initialization)")
if self._worker_thread_done:
return
self._budget += duration.seconds
self._original_time = time.time
self._original_sleep = time.sleep
with utils.Stubber(time, "time", self._Time),\
utils.Stubber(time, "sleep", self._Sleep):
self._owner_thread_turn.clear()
self._worker_thread_turn.set()
self._owner_thread_turn.wait()
if self._worker_thread_exception is not None:
# TODO(hanuszczak): Investigate why this linter warning is triggered.
raise self._worker_thread_exception # pylint: disable=raising-bad-type
def __enter__(self):
if self._worker_thread is not None:
raise AssertionError("Worker thread has been already started, context "
"cannot be reused.")
def Worker():
self._worker_thread_turn.wait()
try:
if self._worker_thread_done:
raise FakeTimeline._WorkerThreadExit
self._thread.run()
except FakeTimeline._WorkerThreadExit:
pass
except Exception as exception: # pylint: disable=broad-except
self._worker_thread_exception = exception
self._worker_thread_done = True
self._owner_thread_turn.set()
self._worker_thread = threading.Thread(target=Worker)
self._worker_thread.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
del exc_type, exc_value, exc_traceback # Unused.
self._worker_thread_done = True
self._worker_thread_turn.set()
def _Sleep(self, seconds):
if threading.current_thread() is not self._worker_thread:
return self._original_sleep(seconds)
self._time += seconds
self._budget -= seconds
while self._budget < 0:
self._worker_thread_turn.clear()
self._owner_thread_turn.set()
self._worker_thread_turn.wait()
if self._worker_thread_done:
raise FakeTimeline._WorkerThreadExit()
def _Time(self):
if threading.current_thread() is not self._worker_thread:
return self._original_time()
return self._time
class FakeDateTimeUTC(object):
"""A context manager for faking time when using datetime.utcnow."""
def __init__(self, fake_time, increment=0):
self.time = fake_time
self.increment = increment
def __enter__(self):
self.old_datetime = datetime.datetime
class FakeDateTime(object):
def __init__(self, time_val, increment, orig_datetime):
self.time = time_val
self.increment = increment
self.orig_datetime = orig_datetime
def __call__(self, *args, **kw):
return self.orig_datetime(*args, **kw)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.orig_datetime, name)
def utcnow(self): # pylint: disable=invalid-name
self.time += self.increment
return self.orig_datetime.utcfromtimestamp(self.time)
datetime.datetime = FakeDateTime(self.time, self.increment,
self.old_datetime)
def __exit__(self, unused_type, unused_value, unused_traceback):
datetime.datetime = self.old_datetime
class Instrument(object):
"""A helper to instrument a function call.
Stores a copy of all function call args locally for later inspection.
"""
def __init__(self, module, target_name):
self.old_target = getattr(module, target_name)
def Wrapper(*args, **kwargs):
self.args.append(args)
self.kwargs.append(kwargs)
self.call_count += 1
return self.old_target(*args, **kwargs)
self.stubber = utils.Stubber(module, target_name, Wrapper)
self.args = []
self.kwargs = []
self.call_count = 0
def __enter__(self):
self.stubber.__enter__()
return self
def __exit__(self, t, value, tb):
return self.stubber.__exit__(t, value, tb)
def RequiresPackage(package_name):
"""Skip this test if required package isn't present.
Note this will only work in opensource testing where we actually have
packages.
Args:
package_name: string
Returns:
Decorator function
"""
def Decorator(test_function):
@functools.wraps(test_function)
def Wrapper(*args, **kwargs):
try:
pkg_resources.get_distribution(package_name)
except pkg_resources.DistributionNotFound:
raise unittest.SkipTest(
"Skipping, package %s not installed" % package_name)
return test_function(*args, **kwargs)
return Wrapper
return Decorator
class RemotePDB(pdb.Pdb):
"""A Remote debugger facility.
Place breakpoints in the code using:
test_lib.RemotePDB().set_trace()
Once the debugger is attached all remote break points will use the same
connection.
"""
handle = None
prompt = "RemotePDB>"
def __init__(self):
# Use a global socket for remote debugging.
if RemotePDB.handle is None:
self.ListenForConnection()
pdb.Pdb.__init__(
self, stdin=self.handle, stdout=codecs.getwriter("utf8")(self.handle))
def ListenForConnection(self):
"""Listens and accepts a single connection."""
logging.warn("Remote debugger waiting for connection on %s",
config.CONFIG["Test.remote_pdb_port"])
RemotePDB.old_stdout = sys.stdout
RemotePDB.old_stdin = sys.stdin
RemotePDB.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
RemotePDB.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
RemotePDB.skt.bind(("127.0.0.1", config.CONFIG["Test.remote_pdb_port"]))
RemotePDB.skt.listen(1)
(clientsocket, address) = RemotePDB.skt.accept()
RemotePDB.handle = clientsocket.makefile("rw", 1)
logging.warn("Received a connection from %s", address)
def _TempRootPath():
try:
root = os.environ.get("TEST_TMPDIR") or config.CONFIG["Test.tmpdir"]
except RuntimeError:
return None
if platform.system() != "Windows":
return root
else:
return None
# TODO(hanuszczak): Consider moving this to some utility module.
def TempDirPath(suffix="", prefix="tmp"):
"""Creates a temporary directory based on the environment configuration.
The directory will be placed in folder as specified by the `TEST_TMPDIR`
environment variable if available or fallback to `Test.tmpdir` of the current
configuration if not.
Args:
suffix: A suffix to end the directory name with.
prefix: A prefix to begin the directory name with.
Returns:
An absolute path to the created directory.
"""
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())
# TODO(hanuszczak): Consider moving this to some utility module.
def TempFilePath(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
"""Creates a temporary file based on the environment configuration.
If no directory is specified the file will be placed in folder as specified by
the `TEST_TMPDIR` environment variable if available or fallback to
`Test.tmpdir` of the current configuration if not.
If directory is specified it must be part of the default test temporary
directory.
Args:
suffix: A suffix to end the file name with.
prefix: A prefix to begin the file name with.
dir: A directory to place the file in.
Returns:
An absolute path to the created file.
Raises:
ValueError: If the specified directory is not part of the default test
temporary directory.
"""
root = _TempRootPath()
if not dir:
dir = root
elif root and not os.path.commonprefix([dir, root]):
raise ValueError("path '%s' must start with '%s'" % (dir, root))
_, path = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
return path
class AutoTempDirPath(object):
"""Creates a temporary directory based on the environment configuration.
The directory will be placed in folder as specified by the `TEST_TMPDIR`
environment variable if available or fallback to `Test.tmpdir` of the current
configuration if not.
This object is a context manager and the directory is automatically removed
when it goes out of scope.
Args:
suffix: A suffix to end the directory name with.
prefix: A prefix to begin the directory name with.
remove_non_empty: If set to `True` the directory removal will succeed even
if it is not empty.
Returns:
An absolute path to the created directory.
"""
def __init__(self, suffix="", prefix="tmp", remove_non_empty=False):
self.suffix = suffix
self.prefix = prefix
self.remove_non_empty = remove_non_empty
def __enter__(self):
self.path = TempDirPath(suffix=self.suffix, prefix=self.prefix)
return self.path
def __exit__(self, exc_type, exc_value, traceback):
del exc_type # Unused.
del exc_value # Unused.
del traceback # Unused.
if self.remove_non_empty:
shutil.rmtree(self.path)
else:
os.rmdir(self.path)
class AutoTempFilePath(object):
"""Creates a temporary file based on the environment configuration.
If no directory is specified the file will be placed in folder as specified by
the `TEST_TMPDIR` environment variable if available or fallback to
`Test.tmpdir` of the current configuration if not.
If directory is specified it must be part of the default test temporary
directory.
This object is a context manager and the associated file is automatically
removed when it goes out of scope.
Args:
suffix: A suffix to end the file name with.
prefix: A prefix to begin the file name with.
dir: A directory to place the file in.
Returns:
An absolute path to the created file.
Raises:
ValueError: If the specified directory is not part of the default test
temporary directory.
"""
def __init__(self, suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
self.suffix = suffix
self.prefix = prefix
self.dir = dir
def __enter__(self):
self.path = TempFilePath(
suffix=self.suffix, prefix=self.prefix, dir=self.dir)
return self.path
def __exit__(self, exc_type, exc_value, traceback):
del exc_type # Unused.
del exc_value # Unused.
del traceback # Unused.
os.remove(self.path)
def main(argv=None):
del argv # Unused.
unittest.main()
|
train.py
|
"""
Written by Matteo Dunnhofer - 2017
models training on ImageNet
"""
import sys
import os.path
import time
from models import alexnet
import tensorflow as tf
import train_util as tu
import numpy as np
import threading
def train(
epochs,
batch_size,
learning_rate,
dropout,
momentum,
lmbda,
resume,
imagenet_path,
display_step,
test_step,
ckpt_path,
summary_path):
""" Procedure to train the model on ImageNet ILSVRC 2012 training set
Args:
resume: boolean variable, true if want to resume the training, false to train from scratch
imagenet_path: path to ILSRVC12 ImageNet folder containing train images,
validation images, annotations and metadata file
display_step: number representing how often printing the current training accuracy
test_step: number representing how often make a test and print the validation accuracy
ckpt_path: path where to save model's tensorflow checkpoint (or from where resume)
summary_path: path where to save logs for TensorBoard
"""
train_img_path = os.path.join(imagenet_path, 'ILSVRC2012_img_train')
ts_size = tu.imagenet_size(train_img_path)
num_batches = int(float(ts_size) / batch_size)
wnid_labels, _ = tu.load_imagenet_meta(os.path.join(imagenet_path, 'data/meta.mat'))
x = tf.placeholder(tf.float32, [None, 224, 224, 3])
y = tf.placeholder(tf.float32, [None, 1000])
lr = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32)
# queue of examples being filled on the cpu
with tf.device('/cpu:0'):
q = tf.FIFOQueue(batch_size * 3, [tf.float32, tf.float32], shapes=[[224, 224, 3], [1000]])
enqueue_op = q.enqueue_many([x, y])
x_b, y_b = q.dequeue_many(batch_size)
pred, _ = alexnet.classifier(x_b, keep_prob)
# cross-entropy and weight decay
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_b, name='cross-entropy'))
with tf.name_scope('l2_loss'):
l2_loss = tf.reduce_sum(lmbda * tf.stack([tf.nn.l2_loss(v) for v in tf.get_collection('weights')]))
tf.summary.scalar('l2_loss', l2_loss)
with tf.name_scope('loss'):
loss = cross_entropy + l2_loss
tf.summary.scalar('loss', loss)
# accuracy
with tf.name_scope('accuracy'):
correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y_b, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar('accuracy', accuracy)
global_step = tf.Variable(0, trainable=False)
epoch = tf.div(global_step, num_batches)
# momentum optimizer
with tf.name_scope('optimizer'):
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=momentum).minimize(loss, global_step=global_step)
# merge summaries to write them to file
merged = tf.summary.merge_all()
# checkpoint saver
saver = tf.train.Saver()
coord = tf.train.Coordinator()
#init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
with tf.Session(config=tf.ConfigProto()) as sess:
if resume:
saver.restore(sess, os.path.join(ckpt_path, 'alexnet-cnn.ckpt'))
else:
sess.run(init)
# enqueuing batches procedure
def enqueue_batches():
while not coord.should_stop():
im, l = tu.read_batch(batch_size, train_img_path, wnid_labels)
sess.run(enqueue_op, feed_dict={x: im,y: l})
# creating and starting parallel threads to fill the queue
num_threads = 3
for i in range(num_threads):
t = threading.Thread(target=enqueue_batches)
t.setDaemon(True)
t.start()
# operation to write logs for tensorboard visualization
train_writer = tf.summary.FileWriter(os.path.join(summary_path, 'train'), sess.graph)
start_time = time.time()
for e in range(sess.run(epoch), epochs):
for i in range(num_batches):
_, step = sess.run([optimizer, global_step], feed_dict={lr: learning_rate, keep_prob: dropout})
#train_writer.add_summary(summary, step)
# decaying learning rate
if step == 170000 or step == 350000:
learning_rate /= 10
# display current training informations
if step % display_step == 0:
c, a = sess.run([loss, accuracy], feed_dict={lr: learning_rate, keep_prob: 1.0})
print ('Epoch: {:03d} Step/Batch: {:09d} --- Loss: {:.7f} Training accuracy: {:.4f}'.format(e, step, c, a))
# make test and evaluate validation accuracy
if step % test_step == 0:
val_im, val_cls = tu.read_validation_batch(batch_size, os.path.join(imagenet_path, 'ILSVRC2012_img_val'), os.path.join(imagenet_path, 'data/ILSVRC2012_validation_ground_truth.txt'))
v_a = sess.run(accuracy, feed_dict={x_b: val_im, y_b: val_cls, lr: learning_rate, keep_prob: 1.0})
# intermediate time
int_time = time.time()
print ('Elapsed time: {}'.format(tu.format_time(int_time - start_time)))
print ('Validation accuracy: {:.04f}'.format(v_a))
# save weights to file
save_path = saver.save(sess, os.path.join(ckpt_path, 'alexnet-cnn.ckpt'))
print('Variables saved in file: %s' % save_path)
end_time = time.time()
print ('Elapsed time: {}'.format(tu.format_time(end_time - start_time)))
save_path = saver.save(sess, os.path.join(ckpt_path, 'alexnet-cnn.ckpt'))
print('Variables saved in file: %s' % save_path)
coord.request_stop()
if __name__ == '__main__':
DROPOUT = 0.5
MOMENTUM = 0.9
LAMBDA = 5e-04 # for weight decay
LEARNING_RATE = 1e-03
EPOCHS = 90
BATCH_SIZE = 128
CKPT_PATH = 'ckpt-alexnet'
if not os.path.exists(CKPT_PATH):
os.makedirs(CKPT_PATH)
SUMMARY = 'summary'
if not os.path.exists(SUMMARY):
os.makedirs(SUMMARY)
IMAGENET_PATH = 'ILSVRC2012'
DISPLAY_STEP = 10
TEST_STEP = 500
if sys.argv[1] == '-resume':
resume = True
elif sys.argv[1] == '-scratch':
resume = False
train(
EPOCHS,
BATCH_SIZE,
LEARNING_RATE,
DROPOUT,
MOMENTUM,
LAMBDA,
resume,
IMAGENET_PATH,
DISPLAY_STEP,
TEST_STEP,
CKPT_PATH,
SUMMARY)
|
multiprocessingexample.py
|
import multiprocessing as mp
import os
import random
def squared(x, queue):
module_name = __name__
parent_id = os.getppid()
process_id = os.getpid()
result = x * x
print(f'module={module_name} : parent={parent_id} : process={process_id} : result={result}')
queue.put(result)
if __name__ == '__main__':
ctx = mp.get_context('spawn')
queue = ctx.Queue()
processes = [ctx.Process(target=squared, args=(random.randint(0, 100), queue,)) for _ in range(10)]
for process in processes:
process.start()
for process in processes:
process.join()
while queue.qsize() != 0:
print(queue.get())
|
test_c10d_nccl.py
|
# Owner(s): ["oncall: distributed"]
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and torch.version.cuda is not None
and int(torch.version.cuda.split('.')[0]) >= 11)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(MultiProcessTestCase):
def _create_process_group_nccl(self, store, opts):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts)
pg = c10d.distributed_c10d._get_default_group()
return pg
def opts(self, high_priority_stream=False):
opts = c10d.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = high_priority_stream
return opts
def setUp(self):
super(ProcessGroupNCCLTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
# self.num_gpus = torch.cuda.device_count()
self._spawn_processes()
def tearDown(self):
super(ProcessGroupNCCLTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
@property
def rank_to_GPU(self):
# return rank to GPU map
return init_multigpu_helper(self.world_size, "nccl")
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_idx = self.rank_to_GPU[self.rank][0]
xs = [torch.FloatTensor([]).cuda(local_device_idx)]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.FloatTensor([]).cuda(local_device_idx)]
xs = [[torch.FloatTensor([]).cuda(local_device_idx) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
return work.result()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = torch.tensor([self.rank]).cuda(self.rank_to_GPU[self.rank][0])
output = broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), output[0])
expected_tensor = torch.empty([i + 1, i + 1]).fill_(i + 1)
xs = [torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx) for device_idx in self.rank_to_GPU[self.rank]]
# test with multiple input tensors (multiple gpu in one rank)
for j in range(len(xs)):
if self.rank == i:
xs[j] = expected_tensor.cuda(device=self.rank_to_GPU[self.rank][j])
broadcast(xs, i, j)
for tensor in xs:
self.assertEqual(tensor, expected_tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
device_count = torch.cuda.device_count()
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.SUM)
ndev = float(self.world_size)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([ndev * (ndev + 1) / 2]),
tensors[0],
)
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
tensors = [torch.tensor([self.rank + 1.]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
tensors[0],
)
# Product
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.world_size))]), tensors[0]
)
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MIN)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[0])
# Max
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.world_size):
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
reduce(tensors, rt, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
if self.rank == rt:
self.assertEqualIgnoreType(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]),
tensors[0],
)
else:
self.assertEqualIgnoreType(
torch.tensor([self.rank + 1]),
tensors[0],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
return work.wait()
tensors = [torch.empty(2, 2).fill_(2).cuda(device=i) for i in local_device_ids]
output_tensors = []
expected_output = []
output_per_gpu = ([torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size)
expected_per_gpu = ([torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size)
for gpu in local_device_ids:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])
result = allgather(output_tensors, tensors)
# Verification
self.assertEqual(output_tensors, expected_output)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(local_device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_gather_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def gather(output_t, input_t, rootRank):
opts = c10d.GatherOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.gather(output_t, input_t, opts)
else:
work = pg.gather([], input_t, opts)
work.wait()
# init input
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
output_ts.append([])
for rank in range(self.world_size):
output_ts[idx].append(torch.tensor([-1]).cuda(gpu_idx))
expected = [[torch.tensor([rank]) for rank in range(self.world_size)]]
for rank in range(self.world_size):
gather(output_ts, tensors, rank)
if rank == self.rank:
self.assertEqual(expected, output_ts)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_gather_stress(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def gather(output_t, input_t, rootRank):
opts = c10d.GatherOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.gather(output_t, input_t, opts)
else:
work = pg.gather([], input_t, opts)
work.wait()
stress_length = 1000
# init input
tensors = []
for i in range(stress_length):
tensors.append([])
for device_id in local_device_ids:
tensors[i].append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for i in range(stress_length):
output_ts.append([[] for _ in range(num_gpus)])
for idx, ls in enumerate(output_ts[i]):
gpu_idx = local_device_ids[idx]
for _ in range(self.world_size):
ls.append(torch.tensor([-1]).cuda(gpu_idx))
expected = [[torch.tensor([rank]) for rank in range(self.world_size)]]
for i in range(stress_length):
for rank in range(self.world_size):
gather(output_ts[i], tensors[i], rank)
# Verification
if rank == self.rank:
self.assertEqual(output_ts[i], expected)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
# init input
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
output_ts.append([])
for rank in range(self.world_size):
output_ts[idx].append(torch.tensor([-1]).cuda(gpu_idx))
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather(output_ts, tensors, opts)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
pg.gather(output_ts, tensors, 0)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather(output_ts, tensors, opts)
with self.assertRaisesRegex(
RuntimeError, "Tensor list must be nonempty"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather(output_ts, [], opts)
with self.assertRaisesRegex(
RuntimeError, "Tensors must be on distinct GPU devices"
):
# init input
tensors2 = []
for device_id in local_device_ids:
tensors2.append(torch.tensor([self.rank]).cuda(device_id))
tensors2.append(torch.tensor([self.rank]).cuda(device_id))
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather(output_ts, tensors2, opts)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_scatter_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def scatter(output_t, input_t, rootRank):
opts = c10d.ScatterOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.scatter(output_t, input_t, opts)
else:
work = pg.scatter(output_t, [], opts)
work.wait()
# init output
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
scatter_list.append([])
for rank in range(self.world_size):
scatter_list[idx].append(torch.tensor([rank]).cuda(gpu_idx))
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for rank in range(self.world_size):
scatter(tensors, scatter_list, rank)
self.assertEqual(expected, tensors)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_scatter_stress(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def scatter(output_t, input_t, rootRank):
opts = c10d.ScatterOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.scatter(output_t, input_t, opts)
else:
work = pg.scatter(output_t, [], opts)
work.wait()
stress_length = 1000
# init output
tensors = []
for i in range(stress_length):
tensors.append([])
for device_id in local_device_ids:
tensors[i].append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for i in range(stress_length):
scatter_list.append([[] for _ in range(num_gpus)])
for idx, ls in enumerate(scatter_list[i]):
gpu_idx = local_device_ids[idx]
for rank in range(self.world_size):
ls.append(torch.tensor([rank]).cuda(gpu_idx))
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for i in range(stress_length):
for rank in range(self.world_size):
scatter(tensors[i], scatter_list[i], rank)
# Verification
self.assertEqual(tensors[i], expected)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
# init output
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
scatter_list.append([])
for rank in range(self.world_size):
scatter_list[idx].append(torch.tensor([rank]).cuda(gpu_idx))
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter(tensors, scatter_list, opts)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
pg.scatter(tensors, scatter_list, 0)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter(tensors, scatter_list, opts)
with self.assertRaisesRegex(
RuntimeError, "Tensor list must be nonempty"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], scatter_list, opts)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
num_gpus = len(local_device_ids)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
output = [torch.tensor([0]).cuda(i) for i in local_device_ids]
# GPU/rank
# 0 [1], [2], [3], [4]
# 1 [2], [3], [4], [5]
# 2 [3], [4], [5], [6]
# 3 [4], [5], [6], [7]
# Sum
tensor_lists = []
input_per_gpu = []
for i in range(self.world_size):
input_per_gpu.append(torch.tensor([self.rank + i + 1]))
for gpu in local_device_ids:
tensor_lists.append([t.cuda(device=gpu) for t in input_per_gpu])
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(num_gpus):
expected = torch.tensor(
[
float((1 + self.world_size) * self.world_size / 2)
+ self.world_size * self.rank
])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(num_gpus):
expected = torch.tensor([self.rank + 1 + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(num_gpus):
expected = torch.tensor(
[self.rank + self.world_size + i]
)
self.assertEqual(expected, output[i])
# Product
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
# math pakcage don't have math.perm until python 3.8, so
# we implement a naive version here.
def perm(n, k):
prod_val = n
for val in range(n - k + 1, n):
prod_val *= val
return prod_val
for i in range(num_gpus):
prod_val = perm(self.rank + self.world_size, self.world_size)
expected = torch.tensor([prod_val])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Test the input params overridden scenarios, aka, when the input is
# a list and output is just one tensor.
# Sum
output_tensor = torch.empty_like(input_per_gpu[0][0]).cuda(self.rank)
input_list = [tensor[0].cuda(self.rank) for tensor in input_per_gpu]
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.SUM).wait()
expected = torch.tensor(
float((1 + self.world_size) * self.world_size / 2) + self.world_size * self.rank
)
self.assertEqualIgnoreType(expected, output_tensor)
# Min
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MIN).wait()
expected = torch.tensor(self.rank + 1)
self.assertEqualIgnoreType(expected, output_tensor)
# Max
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MAX).wait()
expected = torch.tensor(self.rank + self.world_size)
self.assertEqualIgnoreType(expected, output_tensor)
# Product
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.PRODUCT).wait()
prod_val = self.rank + 1
for k in range(1, self.world_size):
prod_val = prod_val * (self.rank + 1 + k)
expected = torch.tensor(prod_val)
self.assertEqualIgnoreType(expected, output_tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_id = self.rank_to_GPU[self.rank][0]
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(local_device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(local_device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_nccl(store, self.opts())
local_device_ids = self.rank_to_GPU[self.rank]
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... len(local_device_ids) GPUs
tensors_list = [[] for _ in range(len(local_device_ids))]
for i in range(1, len(local_device_ids) + 1):
for j in range(i):
tensors_list[i - 1].append(torch.tensor([j + 1]).cuda(local_device_ids[j]))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(1, len(local_device_ids) + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([(j + 1) * self.world_size]), tensors_list[i - 1][j]
)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_send_recv(self):
store = c10d.FileStore(self.file_name, self.world_size)
self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
# Generate the same random tensor
torch.manual_seed(0)
send_tensor = torch.rand(10, 10, device=device)
if self.rank == 0:
dist.send(send_tensor, 1)
if self.rank == 1:
recv_tensor = torch.rand(10, 10, device=device)
dist.recv(recv_tensor, 0)
self.assertEqual(send_tensor, recv_tensor)
# Test with non-contiguous tensors.
send_tensor_view = send_tensor.t()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, 'Tensors must be contiguous'):
dist.send(send_tensor_view, 1)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
return c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_propagate_error_reason(self):
# Need to use NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
# TODO: smaller timeout can fail since PG NCCl does health check in
# constructor. Look into reducing this test's runtime.
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=15))
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
if self.rank == 0:
pg_gloo.barrier().wait()
inp = torch.ones(1).cuda(self.rank)
if self.rank != 0:
# Time out due to rank 0 not calling into allreduce.
with self.assertRaises(RuntimeError):
pg.allreduce([inp]).wait(timedelta(seconds=5))
# Now when nonzero rank attempts to use communicator, original failure reason should be logged.j
try:
pg.allreduce([torch.ones(2).cuda(self.rank)]).wait()
except RuntimeError as e:
self.assertTrue("timed out in call to wait()" in str(e))
self.assertTrue("TensorShape=[1]" in str(e))
else:
self.fail("Expected error to be raised!")
# Unblock rank 0
pg_gloo.barrier().wait()
# TODO: We can also test that if rank 0 attempts to use the communicator,
# then we should error out with the info that it was aborted due to
# timeout on another rank. Although this would only be the case after
# the watchdog has run on the rank, and there is no reliable way
# to confirm it has run.
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist.get_debug_level() != dist.DebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE, FP16_COMPRESS
and BF16_COMPRESS, can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
hook_options = [default.allreduce_hook, default.fp16_compress_hook]
if (
not TEST_WITH_ROCM
and BFLOAT16_AVAILABLE
and c10d.is_nccl_available()
and torch.cuda.nccl.version() >= (2, 10)
):
hook_options.append(default.bf16_compress_hook)
for hook in hook_options:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_bf16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the BF16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.bf16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start, batch_tensors_with_same_shape in product(
[True, False], [True, False], [True, False],
):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
batch_tensors_with_same_shape=batch_tensors_with_same_shape,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@requires_nccl_version((2, 10), "Need NCCL 2.10+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_bf16_compress_wrapper_nccl(self):
self._test_bf16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@requires_nccl_version((2, 10), "Need NCCL 2.10+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_bf16_compress_wrapper_is_view(self):
self._test_bf16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_channels_last_contig(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device(f"cuda:{self.rank}")
tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
process_group.broadcast([tensor]).wait()
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip("Test does not pass when run locally")
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait(timeout=timedelta(seconds=self.op_timeout_sec))
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait(timeout=timedelta(seconds=self.op_timeout_sec))
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _check_valid_comm_exception(self, e):
exception_str = str(e)
valid_exceptions = [
"NCCL communicator was aborted",
"NCCL communicator encountered error",
"Caught collective operation timeout"
]
return any(exc in exception_str for exc in valid_exceptions)
def _wait_for_comm_abort(self, process_group, timeout=None):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
if not timeout:
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
assert isinstance(timeout, timedelta)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timeout)
except Exception as e:
if self._check_valid_comm_exception(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@requires_gloo()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=10)
)
# Control gloo pg used as go-ahead signal/barrier
# to coordinate btwn ranks.
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
failed_collective_timeout = timedelta(milliseconds=100)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=5))
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=failed_collective_timeout)
# Now do a barrier to tell other rank to go ahead.
pg_gloo.barrier().wait()
else:
# Wait on rank 0 to fail.
try:
pg_gloo.barrier().wait()
except Exception as e:
raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}")
# Now verify communicators on this rank have
# been aborted by watchdog.
self._wait_for_comm_abort(process_group, failed_collective_timeout)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
tensors = [torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5)]
torch.distributed.all_reduce_coalesced(tensors, group=process_group)
for i, t in enumerate(tensors):
self.assertEqual(t, torch.full_like(t, self.world_size * (i + (self.world_size + 1.) / 2.)))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
os.environ["ENABLE_NCCL_HEALTH_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
os.environ["ENABLE_NCCL_HEALTH_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
os.environ["ENABLE_NCCL_HEALTH_CHECK"] = "1"
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_nccl_warn_not_in_group_debug_detail(self):
self._test_warn_not_in_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_nccl_warn_not_in_group_debug_info(self):
self._test_warn_not_in_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_nccl_warn_not_in_group_debug_off(self):
self._test_warn_not_in_group(backend="nccl")
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
HPoolPPMineWidget.py
|
from PyQt5.QtWidgets import QWidget, QMessageBox
from PyQt5.Qt import pyqtSignal, QTimerEvent
from ui.HPoolPPMineWidget import Ui_HPoolPPMineWidget
from PyQt5.QtCore import Qt
from config import save_config, get_config
from utils import size_to_str
from datetime import datetime, timedelta
import os
from core import BASE_DIR
from subprocess import Popen, PIPE, STDOUT, CREATE_NO_WINDOW
import socket
import threading
import platform
import time
from core.disk import disk_operation
from utils import is_auto_launch, setup_auto_launch
import psutil
class HPoolPPMineWidget(QWidget, Ui_HPoolPPMineWidget):
signalMineLog = pyqtSignal(str)
signalMineTerminated = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.main_window = None
self.mine_process = None
self.mine_terminating = False
self.mine_restarting = False
self.last_mine_log_time = 0
self.signalMineLog.connect(self.outputMineLog)
self.signalMineTerminated.connect(self.mineTerminated)
config = get_config()
if 'hpool_pp_miner_name' in config:
self.editMinerName.setText(config['hpool_pp_miner_name'])
else:
self.editMinerName.setText(socket.gethostname())
self.saveMineConfig()
self.editMinerName.editingFinished.connect(self.saveMineConfig)
if 'hpool_pp_apikey' in config:
self.editApiKey.setText(config['hpool_pp_apikey'])
self.editApiKey.editingFinished.connect(self.saveMineConfig)
self.buttonStart.clicked.connect(self.clickStartMine)
self.checkBoxAutoStart.stateChanged.connect(self.checkAutoStart)
self.timerIdCheckProcess = self.startTimer(1000)
disk_operation.signalResult.connect(self.slotDiskOperation)
def setMainWindow(self, win):
self.main_window = win
config = get_config()
if 'hpool_pp_auto_mine' not in config:
auto_start = is_auto_launch()
else:
auto_start = config['hpool_pp_auto_mine']
if auto_start:
self.checkBoxAutoStart.setChecked(True)
self.startMine()
def timerEvent(self, event: QTimerEvent) -> None:
timer = event.timerId()
if timer == self.timerIdCheckProcess:
if not self.mine_process:
return
if self.last_mine_log_time == 0:
return
if time.time() - self.last_mine_log_time > 60*2:
self.last_mine_log_time = 0
self.restartMine('等待超时,重启挖矿进程')
try:
p = psutil.Process(pid=self.mine_process.pid)
m = p.memory_info()
if m.private > 1024*1024*500:
self.restartMine('挖矿进程的内存超过500M,重启挖矿进程')
except:
pass
def slotDiskOperation(self, name, opt):
result = opt['result']
if name == 'updateMiningNewPlotTotalInfo':
yesterday_count = result['yesterday_count']
today_count = result['today_count']
total_count = result['total_count']
total_size = result['total_size']
status = f'昨天文件数{yesterday_count}个 今天文件数{today_count}个 总文件数{total_count}个 算力{size_to_str(total_size)}'
self.labelStatus.setText(status)
def checkMineLog(self, text):
if 'The operation completed successfully.' in text:
return False
if '操作成功完成' in text:
return False
consume_max = 1000
if '扫盘' in text:
self.restartMine('扫盘异常,重新启动程序')
return False
# if 'new mining info' in text:
# r = re.compile(r'scan consume=(\d*) scan time')
# found = re.findall(r, text)
# if found:
# consume = int(found[0])
# if consume > consume_max:
# self.restartMine(f'扫盘时间{consume}超过{consume_max}')
# return False
return True
def outputMineLog(self, text):
text = text.strip()
if not text:
return
if not self.checkMineLog(text):
return
self.last_mine_log_time = time.time()
self.textEditLog.append(text)
log_size = len(self.textEditLog.toPlainText())
if log_size > 1024 * 1024:
self.textEditLog.clear()
def minerLog(self, text):
dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.textEditLog.append(dt + ' ' + text)
def mineTerminated(self):
self.editMinerName.setDisabled(False)
self.editApiKey.setDisabled(False)
self.buttonStart.setText('开始挖矿')
index = self.main_window.tabWidget.indexOf(self.main_window.tabHPoolPPMine)
self.main_window.tabWidget.setTabText(index, 'HPoolPP新矿池挖矿')
if self.mine_restarting:
self.mine_restarting = False
self.startMine()
return
if not self.mine_terminating:
self.minerLog('挖矿意外停止')
self.minerLog('正在重新挖矿...')
self.mine_process = None
self.startMine()
return
self.minerLog('挖矿已停止')
self.mine_terminating = False
def saveMineConfig(self):
miner_name = self.editMinerName.text()
apikey = self.editApiKey.text()
config = get_config()
config['hpool_pp_miner_name'] = miner_name
config['hpool_pp_apikey'] = apikey
save_config()
def restartMine(self, log=''):
if self.mine_process is None:
return
if not log:
log = '配置已更改,正在重新挖矿...'
self.minerLog(log)
self.mine_restarting = True
self.mine_process.terminate()
def checkAutoStart(self, i):
config = get_config()
auto_start = self.checkBoxAutoStart.isChecked()
config['hpool_pp_auto_mine'] = auto_start
save_config()
if self.main_window:
self.main_window.setup_auto_launch()
def clickStartMine(self):
if not self.mine_process:
self.textEditLog.clear()
self.startMine(manual=True)
def startMine(self, manual=False):
if self.mine_process:
self.mine_terminating = True
self.mine_process.terminate()
return
config = get_config()
if 'hdd_folders' not in config or not config['hdd_folders']:
QMessageBox.information(self, '提示', '请先配置硬盘')
return
miner_name = config['hpool_pp_miner_name']
apikey = config['hpool_pp_apikey']
if not miner_name:
self.editMinerName.setFocus()
return
if not apikey:
self.editApiKey.setFocus()
return
if len(apikey) != 36:
QMessageBox.information(self, '提示', 'API Key长度是36,请检查')
return
if manual and self.main_window.tabHuobiPoolMineWidget.mine_process:
if QMessageBox.information(self, '警告',
f"确定要双挖吗?\n双挖后一旦爆块,矿池将会对你进行永久性封号!",
QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Cancel:
return
if not self.generateMineConfig():
return
t = threading.Thread(target=self.mineThread)
t.start()
self.editMinerName.setDisabled(True)
self.editApiKey.setDisabled(True)
self.buttonStart.setText('停止挖矿')
index = self.main_window.tabWidget.indexOf(self.main_window.tabHPoolPPMine)
self.main_window.tabWidget.setTabText(index, 'HPoolPP新矿池挖矿(正在挖矿)')
def generateMineConfig(self):
config = get_config()
if 'hdd_folders' not in config or not config['hdd_folders']:
QMessageBox.information(self, '提示', '请先配置最终目录')
return False
plat = platform.system()
if plat == 'Windows':
folder = 'windows'
else:
return False
config_file = os.path.join(BASE_DIR, 'bin', folder, 'miner', 'hpool', 'config_gg.yaml')
paths = ''
for folder_obj in config['hdd_folders']:
if not folder_obj['mine'] or not folder_obj['new_plot']:
continue
folder = folder_obj['folder']
if paths:
paths += u'\n'
paths += f'- {folder}'
if not paths:
QMessageBox.information(self, '提示', '最终目录中没有可以挖矿的目录')
return False
content = f'''token: ""
path:
{paths}
minerName: {config['hpool_pp_miner_name']}
apiKey: {config['hpool_pp_apikey']}
singleThreadLoad: false
cachePath: ""
deviceId: ""
extraParams: {{}}
log:
lv: info
path: ./log
name: miner.log
url:
info: ""
submit: ""
scanPath: true
scanMinute: 60
debug: ""
'''
try:
open(config_file, 'w', encoding='utf-8').write(content)
except Exception as e:
return False
return True
def mineThread(self):
plat = platform.system()
if plat == 'Windows':
folder = 'windows'
bin_file = 'hpool-miner-chia-pp-console.exe'
elif plat == 'Darwin':
folder = 'macos'
bin_file = 'hpool-miner-chia'
elif plat == 'Linux':
folder = 'linux'
bin_file = 'hpool-miner-chia'
else:
self.signalMineLog.emit(f'unknown platform {plat}')
return False
config_file = os.path.join(BASE_DIR, 'bin', folder, 'miner', 'hpool', 'config_gg.yaml')
exe_file = os.path.join(BASE_DIR, 'bin', folder, 'miner', 'hpool', bin_file)
self.mine_process = Popen([exe_file, '-config', config_file], stdout=PIPE, stderr=STDOUT, creationflags=CREATE_NO_WINDOW)
while True:
if self.mine_process is None:
break
line = self.mine_process.stdout.readline()
text = line.decode('utf-8')
if not text and self.mine_process.poll() is not None:
break
self.signalMineLog.emit(text)
self.mine_process = None
self.signalMineTerminated.emit()
|
sekal.py
|
# -*- coding: utf-8 -*-
import time
from threading import Thread
class Track(object):
def __init__(self, name, called):
self.name = name
self.called = called
self.steps = {}
self.tick_period_s = 0
def __getitem__(self, step_sec):
step = int(step_sec / self.tick_period_s)
return self.steps.get(step)
def __setitem__(self, step_sec, value):
step = int(step_sec / self.tick_period_s)
self.steps[step] = value
def call(self, step):
if step in self.steps:
self.called(*self.steps[step][0], **self.steps[step][1])
class Sequencer(object):
def __init__(self, tick_period_s, time_total_s, auto_reload=False):
self._tick_period_s = tick_period_s
self._tracks = []
self._number_of_ticks = int(time_total_s / (tick_period_s))
self._auto_reload = auto_reload
self._running = False
self._thread = Thread(target=self._run)
def addTrack(self, track):
self._tracks.append(track)
track.tick_period_s = self._tick_period_s
def start(self):
self._running = True
self._thread.start()
def stop(self):
self._running = False
if self._thread.isAlive():
self._thread.join()
def _run(self):
while True:
for tick in range(self._number_of_ticks):
tn = time.time()
for track in self._tracks:
track.call(tick)
wait = (self._tick_period_s) - (time.time() - tn)
time.sleep(max(wait, 0))
if not self._running:
break
if not self._auto_reload or not self._running:
break
|
audio_module.py
|
#!/usr/bin/env python3
"""
This is the first version of the UFAL experiments
to try using eye-traking to understand
how humans process ambiguities during translation.
"""
import os
import sys
import threading
import tempfile
import queue
import sounddevice as sd
import soundfile as sf
q = queue.Queue()
class Record_Voice(object):
def __init__(self,location):
self.location = location
thread = threading.Thread(target=self.run,args=())
thread.daemon = True
thread.start()
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
q.put(indata.copy())
def run(self):
filename = tempfile.mktemp(prefix='system_recorded_audio',suffix='.wav', dir=self.location)
# Make sure the file is opened before recording anything:
with sf.SoundFile(filename, mode='x',samplerate=44100,channels=2) as file:
#with sf.SoundFile(filename, mode='x',samplerate=44100,channels=32) as file:
with sd.InputStream(callback=Record_Voice.callback):
print('#' * 80)
print('Initiated audio stream recording')
print('#' * 80)
while True:
file.write(q.get())
print('\nRecording finished: ' + repr(filename))
|
process.py
|
# Python的os模块封装了常见的系统调用,其中就包括fork,可以在Python程序中轻松创建子进程
import os
print('process (%s) start...' % os.getppid())
# pid = os.fork()
# if pid == 0 :
# print('I am child process (%s) and my parent is %s' % (os.getpid(), os.getppid()))
# else:
# print('I (%s) just created a child process (%s).' %(os.getpid(), pid))
# multiprocessing
# multiprocessing模块提供了一个Process类来代表一个进程对象
from multiprocessing import Process
import os
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__ == '__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
# 创建子进程时,只需要传入一个执行函数和函数的参数,创建一个Process实例,用start()方法启动,这样创建进程比fork()还要简单。
# join()方法可以等待子进程结束后再继续往下运行,通常用于进程间的同步。
|
test_queues.py
|
# -*- coding: utf-8 -*-
import time
import socket
from .utils import require_module, missing, LETTERS
import logbook
from logbook.helpers import u
import pytest
@require_module('zmq')
def test_zeromq_handler(logger, handlers, subscriber):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
for test in tests:
for handler in handlers:
with handler:
logger.warn(test)
record = subscriber.recv()
assert record.message == test
assert record.channel == logger.name
@require_module('zmq')
def test_zeromq_background_thread(logger, handlers, subscriber):
test_handler = logbook.TestHandler()
controller = subscriber.dispatch_in_background(test_handler)
for handler in handlers:
with handler:
logger.warn('This is a warning')
logger.error('This is an error')
# stop the controller. This will also stop the loop and join the
# background process. Before that we give it a fraction of a second
# to get all results
time.sleep(0.5)
controller.stop()
assert test_handler.has_warning('This is a warning')
assert test_handler.has_error('This is an error')
@missing('zmq')
def test_missing_zeromq():
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
with pytest.raises(RuntimeError):
ZeroMQHandler('tcp://127.0.0.1:42000')
with pytest.raises(RuntimeError):
ZeroMQSubscriber('tcp://127.0.0.1:42000')
class MultiProcessingHandlerSendBack(object):
def __init__(self, queue):
self.queue = queue
def __call__(self):
from logbook.queues import MultiProcessingHandler
handler = MultiProcessingHandler(self.queue)
handler.push_thread()
try:
logbook.warn('Hello World')
finally:
handler.pop_thread()
@require_module('multiprocessing')
def test_multi_processing_handler():
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingSubscriber
queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = MultiProcessingSubscriber(queue)
p = Process(target=MultiProcessingHandlerSendBack(queue))
p.start()
p.join()
with test_handler:
subscriber.dispatch_once()
assert test_handler.has_warning('Hello World')
def test_threaded_wrapper_handler(logger):
from logbook.queues import ThreadedWrapperHandler
test_handler = logbook.TestHandler()
with ThreadedWrapperHandler(test_handler) as handler:
logger.warn('Just testing')
logger.error('More testing')
# give it some time to sync up
handler.close()
assert (not handler.controller.running)
assert test_handler.has_warning('Just testing')
assert test_handler.has_error('More testing')
@require_module('execnet')
def test_execnet_handler():
def run_on_remote(channel):
import logbook
from logbook.queues import ExecnetChannelHandler
handler = ExecnetChannelHandler(channel)
log = logbook.Logger('Execnet')
handler.push_application()
log.info('Execnet works')
import execnet
gw = execnet.makegateway()
channel = gw.remote_exec(run_on_remote)
from logbook.queues import ExecnetChannelSubscriber
subscriber = ExecnetChannelSubscriber(channel)
record = subscriber.recv()
assert record.msg == 'Execnet works'
gw.exit()
class SubscriberGroupSendBack(object):
def __init__(self, message, queue):
self.message = message
self.queue = queue
def __call__(self):
from logbook.queues import MultiProcessingHandler
with MultiProcessingHandler(self.queue):
logbook.warn(self.message)
@require_module('multiprocessing')
def test_subscriber_group():
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingSubscriber, SubscriberGroup
a_queue = Queue(-1)
b_queue = Queue(-1)
subscriber = SubscriberGroup([
MultiProcessingSubscriber(a_queue),
MultiProcessingSubscriber(b_queue)
])
for _ in range(10):
p1 = Process(target=SubscriberGroupSendBack('foo', a_queue))
p2 = Process(target=SubscriberGroupSendBack('bar', b_queue))
p1.start()
p2.start()
p1.join()
p2.join()
messages = [subscriber.recv().message for i in (1, 2)]
assert sorted(messages) == ['bar', 'foo']
@require_module('redis')
def test_redis_handler():
import redis
from logbook.queues import RedisHandler
KEY = 'redis'
FIELDS = ['message', 'host']
r = redis.Redis(decode_responses=True)
redis_handler = RedisHandler(level=logbook.INFO, bubble=True)
# We don't want output for the tests, so we can wrap everything in a NullHandler
null_handler = logbook.NullHandler()
# Check default values
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
# Are all the fields in the record?
for field in FIELDS:
assert message.find(field)
assert key == KEY
assert message.find(LETTERS)
# Change the key of the handler and check on redis
KEY = 'test_another_key'
redis_handler.key = KEY
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
assert key == KEY
# Check that extra fields are added if specified when creating the handler
FIELDS.append('type')
extra_fields = {'type': 'test'}
del(redis_handler)
redis_handler = RedisHandler(key=KEY, level=logbook.INFO,
extra_fields=extra_fields, bubble=True)
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
for field in FIELDS:
assert message.find(field)
assert message.find('test')
# And finally, check that fields are correctly added if appended to the
# log message
FIELDS.append('more_info')
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS, more_info='This works')
key, message = r.blpop(KEY)
for field in FIELDS:
assert message.find(field)
assert message.find('This works')
@require_module('redis')
def test_redis_handler_lpush():
"""
Test if lpush stores messages in the right order
new items should be first on list
"""
import redis
from logbook.queues import RedisHandler
null_handler = logbook.NullHandler()
redis_handler = RedisHandler(key='lpushed', push_method='lpush',
level=logbook.INFO, bubble=True)
with null_handler.applicationbound():
with redis_handler:
logbook.info("old item")
logbook.info("new item")
time.sleep(1.5)
r = redis.Redis(decode_responses=True)
logs = r.lrange('lpushed', 0, -1)
assert logs
assert "new item" in logs[0]
r.delete('lpushed')
@require_module('redis')
def test_redis_handler_rpush():
"""
Test if rpush stores messages in the right order
old items should be first on list
"""
import redis
from logbook.queues import RedisHandler
null_handler = logbook.NullHandler()
redis_handler = RedisHandler(key='rpushed', push_method='rpush',
level=logbook.INFO, bubble=True)
with null_handler.applicationbound():
with redis_handler:
logbook.info("old item")
logbook.info("new item")
time.sleep(1.5)
r = redis.Redis(decode_responses=True)
logs = r.lrange('rpushed', 0, -1)
assert logs
assert "old item" in logs[0]
r.delete('rpushed')
@pytest.fixture
def handlers(handlers_subscriber):
return handlers_subscriber[0]
@pytest.fixture
def subscriber(handlers_subscriber):
return handlers_subscriber[1]
@pytest.fixture
def handlers_subscriber(multi):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
# Get an unused port
tempsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempsock.bind(('127.0.0.1', 0))
host, unused_port = tempsock.getsockname()
tempsock.close()
# Retrieve the ZeroMQ handler and subscriber
uri = 'tcp://%s:%d' % (host, unused_port)
if multi:
handlers = [ZeroMQHandler(uri, multi=True) for _ in range(3)]
else:
handlers = [ZeroMQHandler(uri)]
subscriber = ZeroMQSubscriber(uri, multi=multi)
# Enough time to start
time.sleep(0.1)
return handlers, subscriber
@pytest.fixture(params=[True, False])
def multi(request):
return request.param
|
RamaNet2.py
|
#!/usr/bin/ python3
print('''\x1b[32m
██████╗ █████╗ ███╗ ███╗ █████╗ ███╗ ██╗███████╗████████╗
██╔══██╗██╔══██╗████╗ ████║██╔══██╗████╗ ██║██╔════╝╚══██╔══╝
██████╔╝███████║██╔████╔██║███████║██╔██╗ ██║█████╗ ██║
██╔══██╗██╔══██║██║╚██╔╝██║██╔══██║██║╚██╗██║██╔══╝ ██║
██║ ██║██║ ██║██║ ╚═╝ ██║██║ ██║██║ ╚████║███████╗ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝ ╚═╝\x1b[35m
╔╦╗┌─┐ ┌┐┌┌─┐┬ ┬┌─┐ ╔═╗┬─┐┌─┐┌┬┐┌─┐┬┌┐┌ ╔╦╗┌─┐┌─┐┬┌─┐┌┐┌
║║├┤ ││││ │└┐┌┘│ │ ╠═╝├┬┘│ │ │ ├┤ ││││ ║║├┤ └─┐││ ┬│││
═╩╝└─┘ ┘└┘└─┘ └┘ └─┘ ╩ ┴└─└─┘ ┴ └─┘┴┘└┘ ═╩╝└─┘└─┘┴└─┘┘└┘
\u001b[31mAuthors: \x1b[33mSari Sabban and Mikhail Markovsky
\u001b[31mDate: \x1b[33m31-May-2017
\u001b[31mCorrespondace: \x1b[33msari.sabban@gmail.com
\u001b[31mURL: \x1b[33mhttps://sarisabban.github.io/RamaNet
\x1b[36m---------------------------------------------------------\x1b[0m''')
import os
import re
import sys
import h5py
import time
import glob
import math
import tqdm
import gzip
import keras
import random
import sklearn
import Bio.PDB
import datetime
import warnings
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from pyrosetta import *
from pyrosetta.toolbox import *
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras.losses import BinaryCrossentropy
from keras.layers.convolutional import Conv2D
from keras.layers import Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers import UpSampling2D, BatchNormalization
from keras.layers import Dropout, GlobalMaxPooling2D, Conv2DTranspose
# Silence Tensorflow, Keras, and initialise PyRosetta
def warn(*args, **kwargs): pass
warnings.warn = warn
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
init('-out:level 0')
print('\x1b[36m--------------------------------------------------------\x1b[0m')
# Setup arguments
parser = argparse.ArgumentParser(description='De Novo Protein Design Neural Network')
parser.add_argument('-d', '--dataset', nargs='+', metavar='', help='Build the Backbone or Sequence datasets')
parser.add_argument('-f', '--frag', action='store_true', help='Build the Fragment dataset')
parser.add_argument('-tb', '--TrainBack', action='store_true', help='Train the Backbone neural network')
parser.add_argument('-tf', '--TrainFrag', action='store_true', help='Train the Fragment neural network')
parser.add_argument('-ts', '--TrainSeq', action='store_true', help='Train the Sequence neural network')
args = parser.parse_args()
class Dataset():
''' Build a machine learning dataset of protein structures '''
def Database(self, TempDIR, FinalDIR):
'''
Downloads the entire PDB database from https://www.wwpdb.org/
moves all files into one directory, then uncompresses all the files
Generates a directory which contains all .PDB structure files
'''
print('\x1b[33m[.] Downloading PDB database...\x1b[0m')
web = 'rsync.wwpdb.org::ftp/data/structures/divided/pdb/'
os.system('rsync -rlpt -q -v -z --delete --port=33444 {} {}'
.format(web, TempDIR))
print('\x1b[32m[+] Download complete\x1b[0m')
os.mkdir(FinalDIR)
filelist = os.listdir(TempDIR)
print('\x1b[33m[.] Moving files...\x1b[0m')
for directories in tqdm.tqdm(filelist):
files = os.listdir('{}/{}'.format(TempDIR, directories))
for afile in files:
location = ('{}/{}/{}'.format(TempDIR, directories, afile))
os.rename(location, '{}/{}'.format(FinalDIR, afile))
os.system('rm -r ./{}'.format(TempDIR))
print('\x1b[32m[+] Moving complete\x1b[0m')
def Extract(self, directory):
'''
Extracts all the .ent.gz files and separate all chains and save them
into seperate .pdb files. Replaces each .ent.gz file with the .pdb
file of each chain
'''
print('\x1b[33m[.] Extracting files...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
io = Bio.PDB.PDBIO()
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
TheName = TheFile.split('.')[0].split('pdb')[1].upper()
InFile = gzip.open(TheFile, 'rt')
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure(TheName, InFile)
count = 0
for chain in structure.get_chains():
io.set_structure(chain)
io.save(structure.get_id()+'_'+chain.get_id()+'.pdb')
os.remove(TheFile)
except Exception as TheError:
print('\x1b[31m[-] Failed to extract\t{}\x1b[33m: {}\x1b[0m'
.format(TheFile.upper(), str(TheError)))
os.remove(TheFile)
os.chdir(current)
def NonProtein(self, directory):
''' Remove non-protein structures '''
print('\x1b[33m[.] Deleting none-protein structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=True)
if Type == []: os.remove(TheFile)
else: continue
except: os.remove(TheFile)
os.chdir(current)
def Size(self, directory, Size_From, Size_To):
''' Remove structures not within defined size '''
print('\x1b[33m[.] Removing unwanted structure sizes...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('X', TheFile)
model = structure[0]
dssp = Bio.PDB.DSSP(model, TheFile, acc_array='Wilke')
for aa in dssp: length = aa[0]
if length >= int(Size_To) or length <= int(Size_From):
os.remove(TheFile)
except: print('\x1b[31m[-] Error in finding protein size\x1b[0m')
os.chdir(current)
def Break(self, directory):
''' Remove structures with a broken (non-continuous) chains '''
print('\x1b[33m[.] Removing non-continuous structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=True)
try:
x = Type[1]
os.remove(TheFile)
except: continue
os.chdir(current)
def Loops(self, directory, LoopLength):
'''
Remove structures that have loops that are larger than a
spesific length
'''
print('\x1b[33m[.] Removing structures with long loops...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('X', TheFile)
model = structure[0]
dssp = Bio.PDB.DSSP(model, TheFile, acc_array='Wilke')
SS = list()
for res in dssp:
ss = res[2]
if ss == '-' or ss == 'T' or ss == 'S': SS.append('L')
else: SS.append('.')
loops = ''.join(SS).split('.')
loops = [item for item in loops if item]
LargeLoop = None
for item in loops:
if len(item) <= LoopLength: continue
else: LargeLoop = 'LargeLoop'
if LargeLoop == 'LargeLoop': os.remove(TheFile)
else: continue
except: os.remove(TheFile)
os.chdir(current)
def Renumber(self, directory):
''' Renumber structures starting at 1 '''
print('\x1b[33m[.] Renumbering structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
pdb = open(TheFile, 'r')
PDB = open(TheFile+'X', 'w')
count = 0
num = 0
AA2 = None
for line in pdb:
count += 1
AA1 = line[23:27]
if not AA1 == AA2: num += 1
final_line =line[:7]+'{:4d}'.format(count)+line[11:17]+\
line[17:21]+'A'+'{:4d}'.format(num)+line[26:]
AA2 = AA1
PDB.write(final_line)
PDB.close()
os.remove(TheFile)
os.rename(TheFile+'X', TheFile)
os.chdir(current)
def Rg(self, directory, RGcutoff):
''' Remove structures that are below the Raduis of Gyration's value '''
print('\x1b[33m[.] Removing structure low Rg values...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
mass = list()
Structure = open(TheFile, 'r')
for line in Structure:
line = line.split()
if line[0] == 'TER' or line[0] == 'END': continue
else:
if line[-1] == 'C': mass.append(12.0107)
elif line[-1] == 'O': mass.append(15.9994)
elif line[-1] == 'N': mass.append(14.0067)
elif line[-1] == 'S': mass.append(32.0650)
elif line[-1] == 'H': mass.append(1.00794)
else: continue
coord = list()
p = Bio.PDB.PDBParser()
structure = p.get_structure('X', TheFile)
for model in structure:
for chain in model:
for residue in chain:
for atom in residue: coord.append(atom.get_coord())
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk)\
in zip(coord, xm))
mm = sum((sum(i)/tmass)**2 for i in zip( * xm))
rg = math.sqrt(rr/tmass-mm)
if rg <= RGcutoff: os.remove(TheFile)
else: continue
os.chdir(current)
def Clean(self, directory):
''' Clean each structure within a directory '''
print('\x1b[33m[.] Cleaning structures...\x1b[0m')
os.mkdir('PDBCleaned')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
CurFile = open(TheFile, 'r')
NewFile = open('Clean-{}'.format(TheFile), 'a')
for line in CurFile:
if line.split()[0] == 'ATOM': NewFile.write(line)
CurFile.close()
NewFile.close()
os.system('mv Clean-{} ../PDBCleaned'.format(TheFile))
os.chdir(current)
def Path(self, directory, path):
''' Generate a file with the path to each file '''
print('\x1b[33m[.] Generating paths...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
PathFile = open('PDB.list', 'a')
for TheFile in tqdm.tqdm(pdbfilelist):
line = '{}/PDBCleaned/{}\n'.format(path, TheFile)
PathFile.write(line)
os.system('mv PDB.list ../')
os.chdir(current)
def RelaxHPC(self, path, cores):
'''
Generate a PBS job scheduler to perform each structure
relax on a HPC
'''
HPCfile = open('relax.pbs', 'w')
HPCfile.write('#!/bin/bash\n')
HPCfile.write('#PBS -N Relax\n')
HPCfile.write('#PBS -q fat\n')
HPCfile.write('#PBS -l select=1:ncpus=1\n')
HPCfile.write('#PBS -j oe\n')
HPCfile.write('#PBS -J 1-{}\n'.format(str(cores)))
HPCfile.write('cd $PBS_O_WORKDIR\n')
HPCfile.write('mkdir PDBRelaxed\n')
HPCfile.write('cd PDBRelaxed\n')
HPCfile.write('''thefile=$(awk -v "line=${PBS_ARRAY_INDEX}"''')
HPCfile.write(''''NR == line { print; exit }' ../PDB.list)\n''')
HPCfile.write('{}/main/source/bin/'.format(path))
HPCfile.write('relax.default.linuxgccrelease')
HPCfile.write('-relax:thorough -nstruct 100 -database ')
HPCfile.write('{}/main/database -s $thefile'.format(path))
print('\x1b[32m[+] Generated HPC job submission file\x1b[0m')
def Relax(self, directory):
''' Relax each structure in a directory on a local computer '''
print('\x1b[33m[.] Relaxing structures...\x1b[0m')
os.mkdir('PDBRelaxed')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
for i in range(1, 101):
scorefxn = get_fa_scorefxn()
relax = pyrosetta.rosetta.protocols.relax.FastRelax()
relax.set_scorefxn(scorefxn)
pose = pose_from_pdb(TheFile)
relax.apply(pose)
pose.dump_pdb('Relaxed{}-{}'.format(i, TheFile))
os.system('mv Relaxed{}-{} ../PDBRelaxed'.format(i, TheFile))
os.chdir(current)
def C_Max(self, filename):
''' Find the maximum value of the Distance Map in a dataset '''
max_in_line = []
with open(filename, 'r') as f:
next(f)
for line in f:
line = line.strip().split(',')[1:]
line = [float(item) for item in line]
max_in_line.append(max(line))
maximum = max(max_in_line)
print('\x1b[32m[+] Contact Map maximum value: {}\x1b[0m'\
.format(maximum))
return(maximum)
def DatasetPSCM(self, directory):
'''
Compile a dataset of each residue's phi and psi angles and another
dataset of the contact map for each structure. This dataset is padded
with zeros.
'''
a = 'Compiling phi and psi angles dataset'
b = 'as well as a distance matrix dataset'
text = a+b
print('\x1b[32m{}\x1b[0m'.format(text))
# Setup dataset header for angles
headerPS = ['PDB_ID']
for i in range(1, 150+1):
headerPS.append(',phi_{},psi_{}'.format(i, i))
headerPS = ''.join(headerPS)
with open('./PS.csv', 'w') as headPS:
headPS.write(headerPS+'\n')
# Setup dataset header for distance matrices
headerCM = ['PDB_ID']
for r in range(1, 150+1):
for c in range(1, 150+1):
headerCM.append(',{}{}'.format(r, c))
headerCM = ''.join(headerCM)
with open('./CM.csv', 'w') as headCM:
headCM.write(headerCM+'\n')
for File in tqdm.tqdm(os.listdir(directory)):
TheFile = '{}/{}'.format(directory, File)
try:
# Compile angles
pose = pose_from_pdb(TheFile)
phi = []
psi = []
for aa in range(len(pose.residues)):
try:
p = pose.phi(aa+1)
s = pose.psi(aa+1)
if p < 0: p = p+360
if s < 0: s = s+360
phi.append(p)
psi.append(s)
except: pass
angles = []
for P, S in zip(phi, psi):
angles.append(str(round(P, 5))+','+str(round(S, 5)))
assert len(phi) == len(psi)
Angles = ','.join(angles)
if len(angles) >= 150: AngLine = Angles
else:
addition = 150-len(angles)
zeros = []
for adds in range(addition): zeros.append('0.0,0.0')
Zeros = ','.join(zeros)
AngLine = '{},{}'.format(Angles, Zeros)
ThePSLine = '{},{}\n'.format(File, AngLine)
with open('PS.csv', 'a') as PSdata:
PSdata.write(ThePSLine)
#Compile contact map (Ca-Ca contact <= 12 angstroms)
BIO = Bio.PDB.PDBParser(QUIET=True)
structure = BIO.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=False)
model = Type
chain = model[0]
CM = []
for aa1 in range(0, 150):
for aa2 in range(0, 150):
try:
residue1 = chain[aa1]
residue2 = chain[aa2]
atom1 = residue1['CA']
atom2 = residue2['CA']
if atom1-atom2 <= 12: CM.append(str(atom1-atom2))
else: CM.append(str(0))
except:
CM.append(str(0))
assert len(CM) == 22500
ContactMap = ','.join(CM)
TheCMLine = '{},{}\n'.format(File, ContactMap)
with open('CM.csv', 'a') as CMdata:
CMdata.write(TheCMLine)
except: pass
def VectorisePSCM(self, PS_file='PS.csv',
CM_file='CM.csv',
C_MAX=12,
fp=np.float64):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
# 1. Import a single row of PS dataset
with open(PS_file) as PSf:
next(PSf)
P, S = [], []
for line in PSf:
# 2. Isolate different angles
line = line.strip().split(',')
p = [float(item) for item in line[1::2]]
s = [float(item) for item in line[2::2]]
assert len(p) == len(s)
P.append(np.array(p, dtype=fp))
S.append(np.array(s, dtype=fp))
with open(CM_file) as CMf:
next(CMf)
CM = []
for line in CMf:
# 3. Isolate different points
line = [float(item) for item in line.strip().split(',')[1:]]
cm = np.reshape(line, (150, 150))
CM.append(np.array(cm, dtype=fp))
# 4. Construct PS matrices
P = np.array(P)
S = np.array(S)
# 5. Normalise PS angles (min/max) [-1, 1]
P /= 180
S /= 180
P -= 1
S -= 1
PS = np.array([P, S])
PS = np.swapaxes(PS, 0, 2)
PS = np.swapaxes(PS, 0, 1)
# 6. Construct CM matrices
CM = np.array(CM)
# 7. Normalise CM contact map (min/max) [-1, 1]
CM /= (C_MAX/2)
CM -= 1
# 8. Construct final dataset matrix
dataset = np.concatenate([PS, CM], axis=2)
# 9. Suffle dataset
sklearn.utils.shuffle(dataset)
# 10. Serialise tensors
with h5py.File('PS+CM.h5', 'w') as data:
dataset = data.create_dataset('default', data=dataset)
def DatasetAsPSaM(self, directory):
'''
Compile a dataset of each residue's amino acid identify, secondary
structure, phi angle, psi angle, solvent accessible surface area as
a .csv file and the contact map as a separate .csv file. to be run
after clean() on the ./cleaned directory, also outputs a file
identifying the sizes of structures, so the largest value can be used
with HeaderAsPSaM()
'''
os.makedirs('./Completed', exist_ok=True)
os.makedirs('./Error_NotEqual', exist_ok=True)
os.makedirs('./Error_Broken', exist_ok=True)
os.makedirs('./Error_Small', exist_ok=True)
for File in tqdm.tqdm(os.listdir(directory)):
try:
TheFile = '{}/{}'.format(directory, File)
pose = pose_from_pdb(TheFile)
DSSP = pyrosetta.rosetta.protocols.moves.DsspMover()
DSSP.apply(pose)
sasa_calc = pyrosetta.rosetta.core.scoring.sasa.SasaCalc()
sasa_calc.calculate(pose)
size = pose.total_residue()
aa = []
ss = []
phi = []
psi = []
sasa = []
info = []
ctmp = []
m = []
surf = list(sasa_calc.get_residue_sasa())
for r in range(size):
if pose.residue(r+1).is_protein():
aa.append(pose.sequence(r+1, r+1))
ss.append(pose.secstruct(r+1))
p = pose.phi(r+1)
if p < 0: p = p+360
phi.append(p)
s = pose.psi(r+1)
if s < 0: s = s+360
psi.append(s)
sasa.append(surf[r])
for r in range(0, size):
for R in range(0, size):
if pose.residue(r+1).is_protein() and\
pose.residue(R+1).is_protein():
CAr = pose.residue(r+1).xyz('CA')
CAR = pose.residue(R+1).xyz('CA')
CAr_CAR_vector = CAR-CAr
Cont = CAr_CAR_vector.norm()
if Cont <= 12: ctmp.append(Cont)
else: ctmp.append(0)
if len(aa) >= 50:
try:
assert len(aa) == len(ss) == len(phi)\
== len(psi) == len(sasa) == math.sqrt(len(ctmp))
for AA,SS,P,S,SASA in zip(aa,ss,phi,psi,sasa):
info.append('{},{},{},{},{}'\
.format(AA, SS, P, S, SASA))
Info = ','.join(info)
with open('./AsPSa_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + Info + '\n')
with open('lengths.txt', 'a') as length:
length.write(str(len(aa))+',')
for x in ctmp:
m.append('{}'.format(x))
M = ','.join(m)
with open('./M_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + M + '\n')
os.system('mv {} ./Completed'.format(TheFile))
except:
os.system('mv {} ./Error_NotEqual'\
.format(TheFile))
else: os.system('mv {} ./Error_Small'.format(TheFile))
except: passos.system('mv {} ./Error_Broken'.format(TheFile))
def HeaderAsPSaM(self, choice='AsPSa'):
'''
Constructs a .csv header and completes the dataset. To find the value of
the largest structure run: sort -nk 1 lengths.txt
'''
with open('lengths.txt', 'r') as L:
length = int(max(L.readlines()[0].strip().split(',')))
header = ['PDB_ID']
if choice == 'AsPSa':
for i in range(1, length+1):
header.append(',aa_{},ss_{},phi_{},psi_{},sasa_{}'\
.format(i, i, i, i, i))
header = ''.join(header)
with open('./AsPSa_noheader_nofill.csv', 'r') as data:
with open('./AsPSa_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./AsPSa_noheader_nofill.csv')
elif choice == 'M':
for r in range(1, length+1):
for c in range(1, length+1):
header.append(',{}{}'.format(r, c))
header = ''.join(header)
with open('./M_noheader_nofill.csv', 'r') as data:
with open('./M_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./M_noheader_nofill.csv')
def Fill(self, filename):
''' Fills missing .csv table spaces with zeros '''
name = filename.split('_')[0]
with open(filename) as f:
with open(name+'.csv', 'a') as F:
first_line = f.readline()
F.write(first_line)
size = len(first_line.strip().split(','))
for line in f:
line = line.strip().split(',')
gap = size - len(line)
for zero in range(gap):
line.append('0')
new_line = ','.join(line)
F.write(new_line + '\n')
os.remove(filename)
def VectoriseAsPSaM(self, filenameA='AsPSa.csv', filenameM='M.csv'):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
pass
def build(self, switches='', directory='PDBDatabase'):
if len(switches) == 20:
switch = list(switches)
if switch[0] == '1': self.Database('DATABASE', directory)
if switch[1] == '1': self.Extract(directory)
if switch[2] == '1': self.NonProtein(directory)
if switch[3] == '1': self.Size(directory, 80, 150)
if switch[4] == '1': self.Break(directory)
if switch[5] == '1': self.Loops(directory, 10)
if switch[6] == '1': self.Renumber(directory)
if switch[7] == '1': self.Rg(directory, 15)
########## --- HUMAN EYE FILTERING --- ##########
if switch[8] == '1': self.Clean(directory)
if switch[9] == '1': self.Path('PDBCleaned', '{PATH}')
if switch[10] == '1': self.RelaxHPC('~/Rosetta', 829)
if switch[11] == '1': self.Relax('PDBCleaned')
if switch[12] == '1': self.DatasetAsPSaM('PDBCleaned')
if switch[13] == '1': self.HeaderAsPSaM('AsPSa')
if switch[14] == '1':
self.HeaderAsPSaM('M')
os.remove('lengths.txt')
if switch[15] == '1':
self.Fill('AsPSa_nofill.csv')
self.Fill('M_nofill.csv')
if switch[16] == '1': self.DatasetPSCM('PDBCleaned')
if switch[17] == '1': self.C_Max('dataset_CM.csv')
if switch[18] == '1': self.VectorisePSCM()
if switch[18] == '1': self.VectoriseAsPSaM()
else: print('\x1b[31m[-] Error\x1b[33m: Wrong string length\x1b[0m')
def Vall(filename='vall.jul19.2011', m=16800, nx=1490):
'''
Compile the PDB IDs, chains, phi, psi, omega, and SASA of all the structures
from the Rosetta vall.jul19.2011 database into a .csv file
'''
assert os.path.isfile('./{}'.format(filename)),\
'Make sure the vall.jul19.2011 file is in the same directory as this script'
with open(filename, 'r') as f:
with open('Fragments.csv', 'w') as F:
header = ['PDBID,Chain']
for i in range(1, nx+1):
header.append(',AA_{},SS_{},P_{},S_{},O_{},SASA_{}'\
.format(i, i, i, i, i, i))
header = ''.join(header)
F.write(header + '\n')
for i in range(30): next(f)
ID = []
CH = []
AA = []
SS = []
P = []
S = []
O = []
SASA= []
ID_seen = set()
for line in f:
line = line.strip().split()
if line[0] not in ID_seen:
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
if exp == '': pass
else: F.write(ID + ',' + CH + ',' + exp + '\n')
ID = None
CH = None
AA = []
SS = []
P = []
S = []
O = []
SASA = []
ID_seen.add(line[0])
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
else:
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
F.write(ID + ',' + CH + ',' + exp)
def Frag_vectorise(filename='Fragments.csv', nx=1452):
''' Vectorises the fragments dataset, normalises it, then serialises it '''
# 1. Import data
rows = len(open(filename).readlines()) - 1
# 2. Generate a list of random number of rows
lines = list(range(1, rows + 1))
random.shuffle(lines)
# 3. Open CSV file
with open(filename, 'r') as File: all_lines_variable = File.readlines()
PDBID, CHAIN, X, Y = [], [], [], []
for i in tqdm.tqdm(lines):
# 4. Import data line by line
line = all_lines_variable[i]
line = line.strip().split(',')
if line[0] == '1OFD': continue # Causes an error
aa = np.array(line[2::6])
ss = np.array(line[3::6])
p = np.array(line[4::6])
s = np.array(line[5::6])
o = np.array(line[6::6])
sasa = np.array(line[7::6])
p = np.array([float(i) for i in p])
s = np.array([float(i) for i in s])
o = np.array([float(i) for i in o])
sasa = np.array([float(i) for i in sasa])
# 5. Re-format data
aa[aa=='A'] = 0
aa[aa=='C'] = 1
aa[aa=='D'] = 2
aa[aa=='E'] = 3
aa[aa=='F'] = 4
aa[aa=='G'] = 5
aa[aa=='H'] = 6
aa[aa=='I'] = 7
aa[aa=='K'] = 8
aa[aa=='L'] = 9
aa[aa=='M'] = 10
aa[aa=='N'] = 11
aa[aa=='P'] = 12
aa[aa=='Q'] = 13
aa[aa=='R'] = 14
aa[aa=='S'] = 15
aa[aa=='T'] = 16
aa[aa=='V'] = 17
aa[aa=='W'] = 18
aa[aa=='Y'] = 19
ss[ss=='L'] = 0
ss[ss=='H'] = 1
ss[ss=='E'] = 2
p[p<0] = p[p<0] + 360
s[s<0] = s[s<0] + 360
o[o<0] = o[o<0] + 360
aa = aa.astype(int)
ss = ss.astype(int)
# 6. Padding categories
gap = nx - aa.size
for pad in range(gap):
aa = np.append(aa, -1)
ss = np.append(ss, -1)
# 7. One-hot encode amino acid sequences and secondary structures
Aminos = []
for x in aa:
letter = [0 for _ in range(20)]
if x != -1: letter[x] = 1
Aminos.append(letter)
Struct = []
for x in ss:
letter = [0 for _ in range(3)]
if x != -1: letter[x] = 1
Struct.append(letter)
aa = np.array(Aminos)
ss = np.array(Struct)
# 8. Normalise data [min/max]
p = (p-0)/(360-0)
s = (s-0)/(360-0)
o = (o-0)/(360-0)
sasa = (sasa-0)/(277-0)
# 9. Padding values
for pad in range(gap):
p = np.append(p, 0)
s = np.append(s, 0)
o = np.append(o, 0)
sasa = np.append(sasa, 0)
# 10. Expand axis
p = np.expand_dims(p, axis=1)
s = np.expand_dims(s, axis=1)
o = np.expand_dims(o, axis=1)
sasa = np.expand_dims(sasa, axis=1)
# 11. Export
featur = np.concatenate((aa, ss), axis=1)
angles = np.concatenate((p, s, o), axis=1)
PDBID.append(line[0])
CHAIN.append(line[1])
X.append(featur)
Y.append(angles)
PDBID = np.array(PDBID)
CHAIN = np.array(CHAIN)
PDBID = np.expand_dims(PDBID, axis=1)
CHAIN = np.expand_dims(CHAIN, axis=1)
X = np.array(X)
Y = np.array(Y)
print('X =', X.shape)
print('Y =', Y.shape)
# 12. Serialise tensors
with h5py.File('Frag_Y.h5', 'w') as y:
dset = y.create_dataset('default', data=Y)
with h5py.File('Frag_X.h5', 'w') as x:
dset = x.create_dataset('default', data=X)
def SQM(filename):
'''
Structure Quality Metric:
Calculates the ratio of helices and sheets to loops, the percent of amino
acids comprising the structure core, and the radius of gyration as values
between 0.0-1.0, it then averages the three values. Returns a value between
0.0-1.0 where good structure >= 0.6
'''
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('{}'.format(filename), filename)
dssp = Bio.PDB.DSSP(structure[0], filename, acc_array='Wilke')
AminoAcid = { 'A':129, 'P':159, 'N':195, 'H':224,
'V':174, 'Y':263, 'C':167, 'K':236,
'I':197, 'F':240, 'Q':225, 'S':155,
'L':201, 'W':285, 'E':223, 'T':172,
'M':224, 'R':274, 'G':104, 'D':193}
sec_struct = []
SASA = []
for aa in dssp:
if aa[2] == 'G' or aa[2] == 'H' or aa[2] == 'I': ss = 'H'
elif aa[2] == 'B' or aa[2] == 'E': ss = 'S'
elif aa[2] == 'S' or aa[2] == 'T' or aa[2] == '-': ss = 'L'
sec_struct.append(ss)
sasa = AminoAcid[aa[1]]*aa[3]
if sasa <= 25: sasa = 'C'
elif 25 < sasa < 40:sasa = 'B'
elif sasa >= 40: sasa = 'S'
SASA.append(sasa)
''' Secondary structure measurement '''
H = len([x for x in sec_struct if x == 'H'])
S = len([x for x in sec_struct if x == 'S'])
L = len([x for x in sec_struct if x == 'L'])
total = len(sec_struct)
ratio = (H+S)/total
limit = 1
slope = 10
bias = 0.5
SS = limit/(1+np.exp(slope*(bias-ratio)))
''' SASA measurement '''
surface = len([x for x in SASA if x == 'S'])
boundery = len([x for x in SASA if x == 'B'])
in_core = len([x for x in SASA if x == 'C'])
total = len(SASA)
percent = (in_core*100)/total
Core = (2.50662/math.sqrt(2*(math.pi)))*math.exp(-((percent-30)**2)/100)
''' Radius of gyration measurement '''
coord = list()
mass = list()
Structure = open(filename, 'r')
for line in Structure:
try:
line = line.split()
x = float(line[6])
y = float(line[7])
z = float(line[8])
coord.append([x, y, z])
if line[-1] == 'C': mass.append(12.0107)
elif line[-1] == 'O': mass.append(15.9994)
elif line[-1] == 'N': mass.append(14.0067)
elif line[-1] == 'S': mass.append(32.065)
except: pass
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk) in zip(coord, xm))
mm = sum((sum(i)/tmass)**2 for i in zip(*xm))
rg = math.sqrt(rr/tmass-mm)
Rg = (2.50662/math.sqrt(2*(math.pi)))*math.exp(-((rg-12)**2)/40)
''' The metric '''
TheMetric = sum([SS, Core, Rg])/3
return(round(TheMetric, 5))
class fold():
''' Folds a protein structure given the phi/psi angles and contact map '''
def __init__(self, Pa, Sa, CM):
CM = np.reshape(CM, (150, 150))
self.size = len([i for i in np.diag(CM, k=1) if i!=0])
self.U = np.triu(CM, k=0)[:self.size, :self.size]
self.L = np.tril(CM, k=0)[:self.size, :self.size]
self.P = np.array(Pa)[:self.size]
self.S = np.array(Sa)[:self.size]
def upper_lower(self, side, name):
''' Fold upper diagonal of contact map using the same phi/psi angles '''
Vs = []
for numb in range(self.size): Vs.append('A')
sequence = ''.join(Vs)
pose = pose_from_sequence(sequence)
for count, (phi, psi) in enumerate(zip(self.P, self.S)):
pose.set_phi(count+1, float(phi))
pose.set_psi(count+1, float(psi))
with open('constraints_{}.cst'.format(name), 'w') as thefile:
for a in range(1, self.size):
for A in range(1, self.size):
if side[a][A] !=0:
line = 'AtomPair CA {} CA {} GAUSSIANFUNC {} 1.0\n'\
.format(a, A, side[a][A])
thefile.write(line)
con = pyrosetta.rosetta.protocols.constraint_movers.ConstraintSetMover()
con.constraint_file('constraints_{}.cst'.format(name))
con.add_constraints(True)
con.apply(pose)
scorefxn = get_fa_scorefxn()
score_manager = pyrosetta.rosetta.core.scoring.ScoreTypeManager()
atom_pair_constraint = score_manager.\
score_type_from_name('atom_pair_constraint')
rama_prepro = score_manager.score_type_from_name('rama_prepro')
scorefxn.set_weight(atom_pair_constraint, 1)
scorefxn.set_weight(rama_prepro, 1)
relax = pyrosetta.rosetta.protocols.relax.FastRelax()
relax.set_scorefxn(scorefxn)
os.remove('constraints_{}.cst'.format(name))
relax.apply(pose)
pose.dump_pdb('backbone_{}.pdb'.format(name))
def multi(self):
''' Fold both upper and lower diagonals at the same time '''
P1 = multiprocessing.Process(target=self.upper_lower,args=[self.U, 'U'])
P2 = multiprocessing.Process(target=self.upper_lower,args=[self.L, 'L'])
P1.start()
P2.start()
P1.join()
P2.join()
################################################################################
############################### NEURAL NETWORKS ################################
################################################################################
class BACKBONE():
''' A neural network that generates a protein backbone '''
pass
class SEQUENCE():
''' A neural network that generates a sequence for a protein backbone '''
pass
class FRAGMENT():
''' A neural network that generates 3-mer and 9-mer fragments '''
pass
def main():
if args.dataset:
DB = Dataset()
DB.build(sys.argv[2])
elif args.frag:
Vall()
Frag_vectorise()
elif args.TrainBack:
print('\x1b[33m[.] Training...\x1b[0m')
BB = BACKBONE()
print('\x1b[32m[+] Training done\x1b[0m')
elif args.TrainFrag:
print('\x1b[33m[.] Training...\x1b[0m')
FR = FRAGMENT()
print('\x1b[32m[+] Training done\x1b[0m')
elif args.TrainSeq:
print('\x1b[33m[.] Training...\x1b[0m')
SQ = SEQUENCE()
print('\x1b[32m[+] Training done\x1b[0m')
if __name__ == '__main__': main()
|
meat.py
|
#!/usr/bin/env python3
"""
NOTES:
- Session.msg
borrowed from statistics/msg/Session.msg
it's possible to introduce this dependency (or leave it as is)
- check statistics/StatsD.msg
Will need to define association between source topics and type of
result statistical data (whether it's event based or time-series based).
This will need to be either configured (not sure if "output-type" will
be easy to blend with activity_sources processing. Or hard-coded in an
internal relational matrix (output msg fields: type, application).
"""
import time
import threading
import rospy
from lg_common.helpers import unpack_activity_sources
from lg_common.helpers import get_message_type_from_string
from lg_common.helpers import get_nested_slot_value
from lg_common.helpers import message_is_nonzero
from lg_common.helpers import SlotUnpackingException
from lg_common.helpers import get_random_string
from lg_msg_defs.msg import Event
from . import submitters
from functools import reduce
ROS_NODE_NAME = "lg_stats"
LG_STATS_DEBUG_TOPIC_DEFAULT = "debug"
class EmptyIncomingMessage(Exception):
pass
class StatsConfigurationError(Exception):
pass
class Processor(object):
"""
tl;dr Processor watches a given ROS topic. It knows how to discard
or observe specific slots (sub-slots) of ROS messages. It can calculate an average of sum
of ROS message values or submit *every* message as event (default strategy).
- watched_topic e.g. "/spacenav/twist"
- measurement - name of influxdb measurment - by default 'lg_stats'
- msg_slot - dot-delimited list of attributes inside a message .e.g. 'header.seq' or 'angular.x'
- debug_pub - publisher for publishing debugging Event.msg
- resolution - how often we submit the data
- how long to wait until old values get re-submitted
- strategy: default_session, default, count, count_nonzero, average (either get attrib from message and write
to influx, count messages or calculate average of slot values
- influxdb_client - instance of influx client
"""
def __init__(self,
watched_topic=None,
measurement=None,
msg_slot=None,
debug_pub=None,
resolution=5,
inactivity_resubmission=5,
strategy='default',
influxdb_client=None
):
if not msg_slot:
msg = "Message slot not passed to Processor"
rospy.logerr(msg)
raise StatsConfigurationError(msg)
if not watched_topic:
msg = "Watched topic not passed to Processor"
rospy.logerr(msg)
raise StatsConfigurationError(msg)
if not measurement:
if strategy == 'default' or strategy == 'default_session':
self.measurement = 'lg_stats_event'
else:
self.measurement = 'lg_stats_metric'
else:
self.measurement = measurement
self.watched_topic = watched_topic
self.counter = 0
self.messages = []
self.strategy = strategy
self.msg_slot = msg_slot
self.debug_pub = debug_pub # debug ROS publisher topic
self.resolution = resolution # seconds
self.inactivity_resubmission = inactivity_resubmission # seconds
self.time_of_last_in_msg = None # time of the last processed incoming message
self.last_in_msg = None # message from the source topic, last incoming mesage
self.last_influx_data = None # last message submitted to InfluxDB
self.last_out_msg = None # last /lg_stats/debug message
self.time_of_last_resubmission = None
self.influxdb_client = influxdb_client
self.resubmission_thread = None
self._lock = threading.Lock()
rospy.loginfo("Initializing Processor instance: %s" % self)
def __str__(self):
return "<Processor instance for topic %s, msg_slot %s, strategy: %s>" % (self.watched_topic, self.msg_slot, self.strategy)
def __repr__(self):
return self.__str__()
def _start_resubmission_thread(self):
"""
Starts two threads:
- resubmission: responsible for re-submitting event data on topics which
have low messaging traffic - useful for events liek mode changes or
director messages
- background: responsible for timely, frequent event submission of stats liek
rate of touchscreen touch events or average value on a topic containing
prox sensor range
"""
if self.strategy == "default" or self.strategy == 'default_session':
rospy.loginfo("Starting %s strategy resubmission thread for %s" % (self.strategy, self))
self.resubmission_thread = threading.Thread(target=self._resubmission_thread)
self.resubmission_thread.start()
else:
rospy.loginfo("Starting background thread for %s" % self)
self._periodic_flush_thread = threading.Thread(target=self._periodic_flush_thread)
self._periodic_flush_thread.start()
def on_shutdown(self):
rospy.loginfo("Received shutdown for periodic/resubmission")
def _periodic_flush_thread(self):
"""
Flushes accumulated values for non-default strategies
"""
while not rospy.is_shutdown():
rospy.logdebug("Background thread loop for %s" % self.watched_topic)
self._flushing_worker()
rospy.logdebug("Background thread for %s going to sleep" % self.watched_topic)
for interval in range(0, self.resolution):
if rospy.is_shutdown():
break
rospy.sleep(1)
rospy.logdebug("Background thread finished for %s has finished" % self.watched_topic)
def _resubmission_thread(self):
"""
The method runs as a background thread.
It checks whether there was any activity on the handled ROS
topic every "inactivity_resubmission" period.
If there was not, based on the last ROS message, a LG Stats
update is sent out (both /lg_stats/debug topic as well as to InfluxDB.
"""
while not rospy.is_shutdown():
rospy.logdebug("Resubmission thread loop for %s" % self.watched_topic)
self._resubmit_worker()
rospy.logdebug("Resubmission thread sleeping ...")
for interval in range(0, self.resolution):
if rospy.is_shutdown():
break
rospy.sleep(1)
rospy.loginfo("Resubmission thread finished for %s has finished" % self.watched_topic)
def _resubmit_worker(self):
"""
Thread that re-submits data influx.
It modifies the timestamp to current time as well as the
value which needs to indicate that we're re-submitting a message
as opposed to submission of a new one.
"""
with self._lock:
if self.last_in_msg and self.time_of_last_in_msg:
if not self.time_of_last_resubmission:
self.time_of_last_resubmission = self.time_of_last_in_msg
elapsed = time.time() - self.time_of_last_resubmission
if int(round(elapsed)) > self.inactivity_resubmission:
rospy.logdebug("Re-submitting last message to InfluxDB ('%s') ..." % self.last_influx_data)
# regenerate last message with new timestamp and diminished value
self.last_out_msg.value = "0.5"
self.last_out_msg.span = str(self.resolution)
self.debug_pub.publish(self.last_out_msg)
regenerated_message = self.influxdb_client.get_data_for_influx(self.last_out_msg, self.measurement)
self.influxdb_client.write_stats(regenerated_message)
self.time_of_last_resubmission = time.time()
else:
rospy.logdebug("The 'inactivity_resubmission' (%s) period has not "
"elapsed yet = %s" % (self.inactivity_resubmission, elapsed))
else:
rospy.logdebug("Nothing received on topic %s so far." % self.watched_topic)
def _get_slot_value(self, msg):
"""
Returns slot section of the message.
If the slot section is empty, it evaluates to False (bool({})).
If slot is not defined for this Processor, returns False.
"""
if self.msg_slot:
# this may be {} (empty message) and will evaluate still to False
try:
slot_value = get_nested_slot_value(self.msg_slot, msg)
return slot_value[self.msg_slot]
except SlotUnpackingException:
rospy.logerr("Could not get slot value for message %s with slot %s" % (msg, self.msg_slot))
return ''
else:
msg = "Message slot not defined for topic %s" % (self.watched_topic)
rospy.logerr(msg)
raise StatsConfigurationError(msg)
def _compare_messages(self, msg_1, msg_2):
"""
Returns True if relevant field of the messages for
this processor instance is equal.
"""
if get_nested_slot_value(self.msg_slot, msg_1) == get_nested_slot_value(self.msg_slot, msg_2):
return True
else:
return False
def _get_outbound_message(self, src_msg):
"""
Accepts incoming ROS message that came on a topic that Processor listens at.
Returns outbound message for the /lg_stats/debug topic
based on the data from the source topic message (src_msg).
This message will contain `influx` attribute that is ready for influxdb submission.
"""
if self.strategy == 'default':
# self.resolution
slot_value = self._get_slot_value(src_msg) # this may raise EmptyIncomingMessage
out_msg = Event(measurement=self.measurement,
src_topic=self.watched_topic,
field_name=self.msg_slot,
type="event",
metadata=str(slot_value),
span=str(self.resolution),
value="1.0")
return out_msg
elif self.strategy == 'default_session':
self.session_id = get_random_string(N=8, uppercase=False)
slot_value = self._get_slot_value(src_msg) # this may raise EmptyIncomingMessage
out_msg = Event(measurement=self.measurement,
src_topic="session:" + self.watched_topic,
field_name=self.msg_slot,
type="event",
metadata=str(slot_value) + "__%s" % self.session_id,
span=str(self.resolution),
value="1.0")
return out_msg
elif self.strategy == 'average':
"""
calculate average - add value to list and wait for resubmission
return False because no outbound message should be generated
"""
slot_value = self._get_slot_value(src_msg) # this may raise EmptyIncomingMessage
self.messages.append(slot_value)
return False
elif self.strategy == 'count':
"""
make each message increase a counter
return False because no outbound message should be generated
"""
self.counter += 1
return False
elif self.strategy == 'count_nonzero':
"""
make each nonzero message increase a counter (where nonzero means that each
slot or subslot needs to be different than 0)
return False because no outbound message should be generated
"""
is_nonzero = message_is_nonzero(src_msg) # this may raise EmptyIncomingMessage
if is_nonzero:
self.counter += 1
return False
def _flushing_worker(self):
"""
Flushes non-default strategy buffers - calculates rates/counts etc
"""
if self.strategy == 'default' or self.strategy == 'default_session':
"""
Do nothing
"""
rospy.logdebug("Not flushing %s because of default strategy" % self)
elif self.strategy == 'count':
"""
Submit count, clean buffer
"""
rospy.logdebug("Flushing %s" % self)
out_msg = Event(measurement=self.measurement,
src_topic=self.watched_topic,
field_name=self.msg_slot,
type="rate",
metadata="flush",
span=str(self.resolution),
value=str(self.counter))
self.counter = 0
rospy.logdebug("Flushing %s with out_msg=%s" % (self, out_msg))
self._submit_influxdata(out_msg)
elif self.strategy == 'count_nonzero':
"""
Submit count_nonzero, clean buffer
"""
rospy.logdebug("Flushing %s" % self)
out_msg = Event(measurement=self.measurement,
src_topic=self.watched_topic,
field_name=self.msg_slot,
type="nonzero_rate",
metadata="flush",
span=str(self.resolution),
value=str(self.counter))
self.counter = 0
rospy.logdebug("Flushing %s with out_msg=%s" % (self, out_msg))
self._submit_influxdata(out_msg)
elif self.strategy == 'average':
"""
Calculate average, submit and clean buffer
"""
rospy.logdebug("Flushing %s" % self)
try:
average = reduce(lambda x, y: float(x) + float(y), self.messages) / len(self.messages)
except TypeError:
"""
No messages to count
"""
return
self.messages = []
out_msg = Event(measurement=self.measurement,
src_topic=self.watched_topic,
field_name=self.msg_slot,
type="average",
metadata="flush",
span=str(self.resolution),
value=str(average))
rospy.logdebug("Flushing %s with out_msg=%s" % (self, out_msg))
self._submit_influxdata(out_msg)
else:
"""
unknown strategy
"""
rospy.logdebug("Unknown strategy %s for %s" % (self.strategy, self))
pass
def _submit_influxdata(self, out_msg):
"""
Accept out_msg of type Event, calculates influx submission string,
populates out_msg `influx` atrribute with it and submits its `influx` attribute
(which is a string) to influxdb. Publishes Event type message about.
"""
influx_data = self.influxdb_client.get_data_for_influx(out_msg, self.measurement)
out_msg.influx = str(influx_data)
rospy.logdebug("Submitting to InfluxDB: '%s'" % influx_data)
rospy.logdebug("Publishing out_msg: %s" % out_msg)
rospy.logdebug("Types: %s, %s, %s, %s, %s" % (type(out_msg.measurement),
type(out_msg.src_topic),
type(out_msg.type),
type(out_msg.metadata),
type(out_msg.value)))
self.debug_pub.publish(out_msg)
self.influxdb_client.write_stats(influx_data)
return influx_data
def process(self, msg):
"""
Callback public method for messages flowing on observed topic
"""
m = "Processor received: '%s'" % msg
rospy.logdebug(m)
try:
out_msg = self._get_outbound_message(msg)
if out_msg:
influx_data = self._submit_influxdata(out_msg)
"""
Do resubmission only for events
"""
with self._lock:
self.last_in_msg = msg
self.time_of_last_in_msg = time.time()
self.last_influx_data = influx_data
self.last_out_msg = out_msg
self.time_of_last_resubmission = None
except EmptyIncomingMessage as ex:
rospy.logerr(ex)
return
def get_influxdb_client():
submitter_type = rospy.get_param("~submission_type", None)
host = rospy.get_param("~host", None)
port = rospy.get_param("~port", None)
database = rospy.get_param("~database", None)
if not submitter_type or not host or not port:
raise RuntimeError("No InfluxDB connection details provided in the roslaunch configuration.")
return getattr(submitters, submitter_type)(host=host, port=port, database=database)
def main():
rospy.init_node(ROS_NODE_NAME, anonymous=True)
debug_topic = "%s/%s" % (ROS_NODE_NAME, LG_STATS_DEBUG_TOPIC_DEFAULT)
debug_topic_pub = rospy.Publisher(debug_topic, Event, queue_size=3)
# resolution implementation is global across all watched topics which
# may, may not be desirable ; in other words, we may want to have
# the time resolution configurable per specified source topic processor instance
resolution = rospy.get_param("~resolution", 2)
inactivity_resubmission = rospy.get_param("~inactivity_resubmission", resolution)
event_measurement_name = rospy.get_param("~event_measurement_name", 'lg_stats_event')
metric_measurement_name = rospy.get_param("~metric_measurement_name", 'lg_stats_metric')
# source activities - returns list of dictionaries
stats_sources = unpack_activity_sources(rospy.get_param("~activity_sources"))
influxdb_client = get_influxdb_client()
processors = []
for stats_source in stats_sources:
# for single stats_source dictionary please see unpack_activity_sources() docs
# dynamic import based on package/message_class string representation
msg_type = get_message_type_from_string(stats_source["message_type"])
p = Processor(watched_topic=stats_source["topic"],
msg_slot=stats_source["slot"],
debug_pub=debug_topic_pub,
resolution=resolution,
strategy=stats_source["strategy"],
inactivity_resubmission=inactivity_resubmission,
influxdb_client=influxdb_client)
p._start_resubmission_thread() # keep it separated (easier testing)
rospy.loginfo("Subscribing to topic '%s' (msg type: '%s') ..." % (stats_source["topic"], msg_type))
rospy.Subscriber(stats_source["topic"], msg_type, p.process, queue_size=3)
processors.append(p)
rospy.on_shutdown(p.on_shutdown)
# wake all processors that have strategy of average and count and make sure their buffers are emptied
rospy.loginfo("Initializing lg_stats with: %s" % processors)
rospy.loginfo("%s spinning ..." % ROS_NODE_NAME)
rospy.spin()
|
vis_client_test.py
|
#!/usr/bin/env python
from pylab import *
import socket
import time
import threading
import json
from collections import deque
from glider_icon import *
from helper import lla2flatearth
#init plot
fig, ax = plt.subplots()
ax.axis('equal')
#init glider icon
bp, lwp, rwp, ltp, rtp, vp, tp = addAndGetGliderIcon(ax, 0,0,0,0,0)
#init glider trail
trail_len = 1000
trail_x = deque(maxlen=trail_len)
trail_y = deque(maxlen=trail_len)
trail, = ax.plot([0.0], [0.0], color='m', linewidth=2)
#init glider properties
glider_x = 0
glider_y = 0
glider_heading = 0
glider_roll = 0
glider_altitiude = 0
#init tape
new_tape = True
tape_x = None
tape_y = None
tape, = ax.plot([0.0], [0.0], color='b', linewidth=1)
#init tcp connection
tcp_ip = '127.0.0.1'
tcp_port = 5511
buffer_size = 4096
tcp_loop = True
#init tcp data
tcp_dict = None
data_init = False
tape_init = False
glider_data = None
tape_data = None
#thread for handling incoming TCP data
def tcpThread():
global glider_data, tape_data, tcp_dict, data_init, tape_init, tape_x, tape_y, glider_x, glider_y, glider_heading, glider_roll, glider_altitiude
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((tcp_ip, tcp_port))
while tcp_loop:
data = s.recv(buffer_size)
data = data.splitlines()[0]
try:
tcp_dict = json.loads(str(data))
channel = tcp_dict['CHANNEL'].upper()
if channel == "NEW_TAPE":
if not tape_init:
tape_init = True
tape_data = tcp_dict
tape_x = [item for sublist in tape_data['x'] for item in sublist]
tape_y = [item for sublist in tape_data['y'] for item in sublist]
elif channel == "GLIDER_STATE":
if not data_init:
data_init = True
glider_data = tcp_dict
glider_x, glider_y = lla2flatearth(glider_data['LAT'], glider_data['LON'])
glider_heading = glider_data["HEADING"]*180/math.pi
glider_roll = glider_data["PHI"]*180/math.pi
glider_altitiude = glider_data["ALT"]
except:
pass
s.close()
print "Closing Visualization TCP Thread"
t = threading.Thread(target=tcpThread)
t.start()
#timer to update figure
def timer_callback(axes):
global ax, bp, lwp, rwp, ltp, rtp, vp, tp
if tape_init:
tape.set_xdata(tape_x)
tape.set_ydata(tape_y)
trail_x.append(glider_x)
trail_y.append(glider_y)
trail.set_xdata(trail_x)
trail.set_ydata(trail_y)
bp.remove()
lwp.remove()
rwp.remove()
ltp.remove()
rtp.remove()
vp.remove()
tp.remove()
bp, lwp, rwp, ltp, rtp, vp, tp = addAndGetGliderIcon(ax, glider_x, glider_y, glider_heading, glider_roll, glider_altitiude)
#center on glider
xmin, xmax = xlim()
ymin, ymax = ylim()
width = abs(xmax-xmin)
height = abs(ymax-ymin)
xlim(glider_x-width/2, glider_x+width/2)
ylim(glider_y-height/2, glider_y+height/2)
fig.canvas.draw()
#key callback to close TCP thread
def on_key_close_tcp(event):
global tcp_loop
tcp_loop = False
#fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect('key_press_event', on_key_close_tcp)
timer = fig.canvas.new_timer(interval=100)
timer.add_callback(timer_callback, ax)
timer.start()
plt.show()
tcp_loop = False
|
LaunchToRhythm.py
|
import sys
import time
import threading
import json
from pynput.keyboard import Key, Controller
try:
import launchpad_py as launchpad
except ImportError:
try:
import launchpad #what the fuck bro
except ImportError:
sys.exit("error loading launchpad.py") #no why a try catch
config = open("config.json") #Open the config file
configLoaded = json.load(config) #Load the config
def main():
mode = None
if launchpad.LaunchpadPro().Check( 0 ):
lp = launchpad.LaunchpadPro()
if lp.Open( 0 ):
print("Launchpad Pro Connected! (Side effects aren't supported yet)")
mode = "Pro"
elif launchpad.LaunchpadProMk3().Check( 0 ):
lp = launchpad.LaunchpadProMk3()
if lp.Open( 0 ):
print("Launchpad Pro Mk3 Connected!")
mode = "ProMk3"
elif launchpad.LaunchpadMiniMk3().Check( 1 ):
lp = launchpad.LaunchpadMiniMk3()
if lp.Open( 1 ):
print("Launchpad Mini Mk3 Connected!")
mode = "MiniMk3"
elif launchpad.LaunchpadLPX().Check( 1 ):
lp = launchpad.LaunchpadLPX()
if lp.Open( 1 ):
print("Launchpad X Connected!")
mode = "LPX"
elif launchpad.LaunchpadMk2().Check( 0 ):
lp = launchpad.LaunchpadMk2()
if lp.Open( 0 ):
print("Launchpad Mk2 Connected!")
mode = "Mk2"
if mode is None:
print("Did not find any Launchpads!")
return
def effect1():
# Buttons to light up in the first effect
effect1array = [
[8, 8],
[8, 7],
[8, 6],
[8, 5],
[8, 4],
[8, 3],
[8, 2],
[8, 1],
[7, 0],
[6, 0],
[5, 0],
[4, 0],
[3, 0],
[2, 0],
[1, 0]
]
while True:
for i in effect1array:
lp.LedCtrlXYByCode(i[0], i[1], configLoaded["effect1color"])
time.sleep(configLoaded["effect1speed"])
lp.LedCtrlXYByCode(i[0], i[1], 0)
def effect2():
# Buttons to light up in the second effect
effect2array = [
[0, 7],
[1, 7],
[2, 7],
[2, 8],
[7, 7],
[6, 7],
[5, 7],
[5, 8]
]
while True:
for i in effect2array:
lp.LedCtrlXYByCode(i[0], i[1], configLoaded["effect2colors"][1])
time.sleep(configLoaded["effect2speed"])
lp.LedCtrlXYByCode(i[0], i[1], configLoaded["effect2colors"][0])
if configLoaded["effect1enabled"]:
threading.Thread(target=effect1).start() #Start effect 1 in background.
if configLoaded["effect2enabled"]:
threading.Thread(target=effect2).start() #Start effect 2 in background.
lp.LedCtrlXYByCode(0, 0, 5) #Set exit button color
while True:
if mode == 'Pro' or mode == 'ProMk3':
buts = lp.ButtonStateXY( mode = 'pro')
else:
buts = lp.ButtonStateXY()
keyboard = Controller()
if buts != []:
for i, val in enumerate(configLoaded["buttons"]):
if buts[0] == val[0] and buts[1] == val[1]:
if buts[2] >= 1:
keyboard.press(configLoaded["keys"][i])
lp.LedCtrlXYByCode(val[0], val[1], configLoaded["pressColor"])
elif buts[2] == 0:
keyboard.release(configLoaded["keys"][i])
lp.LedCtrlXYByCode(val[0], val[1], 0)
if buts[0] == 0 and buts[1] == 0 and buts[2] == 127:
lp.Reset() # turn all LEDs off
lp.Close() # close the Launchpad (will quit with an error due to a PyGame bug)
if __name__ == '__main__':
main()
|
auto_power_off.py
|
import platform # For getting the operating system name
import subprocess # For executing a shell command
import sys
import threading
import time
from time import sleep
failCount = 0
routerAddress = "192.168.1.1"
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Option for the number of packets as a function of
param = '-n' if platform.system().lower() == 'windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
return subprocess.call(command) == 0
def shut_down():
return subprocess.call(["poweroff"]) == 0
def check_power(host):
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
result = ping(host)
global failCount
if not result:
failCount += 1
log("ping fail", "1")
if failCount > 6:
log("ping fail for 8 minutes, nas will shutdown", "2")
if shut_down():
log("shutdown execute successfully", "0")
sys.exit()
else:
log("shutdown execute fail", "1")
else:
failCount = 0
def log(msg, level):
print(msg)
subprocess.call(["log_tool", "-a", msg, "-t", level, "-N", "Power Check", "-G", "Power"])
if __name__ == '__main__':
log("Check power start!", "0")
while True:
threading.Thread(target=check_power(routerAddress), name='CheckPowerThread').start()
sleep(60)
|
test_lock.py
|
"""
Copyright (c) 2008-2014, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from .test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_simple..." % self.__class__.__name__)
anID = self.env.lock_id()
if verbose:
print("locker ID: %s" % anID)
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print("Aquired lock: %s" % lock)
self.env.lock_put(lock)
if verbose:
print("Released lock: %s" % lock)
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_threaded..." % self.__class__.__name__)
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123)
def test04_lock_timeout2(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
# Floating point rounding
self.assertTrue((end_time-start_time) >= 0.0999)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
self.assertTrue(deadlock_detection.count>0)
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print("%s: locker ID: %s" % (name, anID))
for i in range(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print("%s: Aquired %s lock: %s" % (name, lt, lock))
self.env.lock_put(lock)
if verbose:
print("%s: Released %s lock: %s" % (name, lt, lock))
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
y = mx.sym.pow(2, exp)
x = np.ones(shape)*3
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
@unittest.skip("Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/12885")
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2);
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='bilinear', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32, forward_check_eps=1E-3):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim)
epsilon = 1e-3
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
in_data[abs(in_data) < epsilon] = 2 * epsilon
for i in range(in_data_dim):
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape)],
[npy_out_backward],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# # check gradient
# if dtype is not np.float16:
# check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape)],
[npy_out_backward],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
# # check gradient
# if dtype is not np.float16:
# check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3)
def test_layer_norm():
for dtype, forward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4]):
for in_shape in [(10, 6, 5), (10, 10)]:
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/12901")
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
def get_data():
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
input_np = np.array(list(get_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
x = mx.sym.Variable('x', dtype=np.float32)
sym = mx.sym.Cast(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x' : mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@unittest.skip("Flaky test. Tracked in https://github.com/apache/incubator-mxnet/issues/13600")
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
if not sys.platform.startswith('win'): # no fork in windows
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive(), "deadlock may exist in custom operator"
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
def test_begin_equals_end(shape, begin, end, step):
in_arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
out_arr = mx.nd.slice(in_arr, begin=begin, end=end, step=step)
assertRaises(MXNetError, test_begin_equals_end, (4,), (2,), (2,), (1,))
assertRaises(MXNetError, test_begin_equals_end, (1, 5), (None, 3), (None, 3), (-1, 1))
assertRaises(MXNetError, test_begin_equals_end, (3, 4, 5), (1, 3, 1), (3, 3, 1), (1, -3, 2))
assertRaises(MXNetError, test_begin_equals_end, (2, 4), (None, 2), (None, 2), (1, -1))
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputHeight - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
op_exe.forward()
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
op_exe.forward()
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_input0', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
@with_seed()
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D Input
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D Input
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
if __name__ == '__main__':
import nose
nose.runmodule()
|
dma_timer.py
|
__author__ = "David Northcote (Modified from Craig Ramsay)"
__organisation__ = "The Univeristy of Strathclyde"
__support__ = "https://github.com/strath-sdr/rfsoc_radio"
import time
import threading
import ipywidgets as ipw
class DmaTimer():
"""Class for scheduling periodic callbacks.
Timer class for periodically passing new data from a generator to a
callback function. Useful for passing data from DMA transfers back to a
visualisation function.
"""
def __init__(self, callback, gen, t):
"""Create new dma-based data timer.
callback: function to call with data chunk
gen: function to call to return data chunk
(usually a dma channel's transfer function)
t: time between each generated data chunk
"""
self.callback = callback
self.gen = gen
self.t = t
self.stopping = True
self._start_button = ipw.Button(description=u'\u25B6',
layout=ipw.Layout(margin='auto'))
self._start_button.on_click(lambda _: self.start())
self._stop_button = ipw.Button(description=u'\u25A0',
layout=ipw.Layout(margin='auto'))
self._stop_button.on_click(lambda _: self.stop())
self._stop_button.style.button_color = 'tomato'
self._start_button.style.button_color = 'lightgray'
def _do(self):
"""Generate new data and restart timer thread.
Should never be run directly. use `start()` instead.
"""
while not self.stopping:
next_timer = time.time() + self.t
self.callback(self.gen())
sleep_time = next_timer - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
def start(self):
"""Start the data generator thread."""
if self.stopping:
self._start_button.style.button_color = 'lightgreen'
self._stop_button.style.button_color = 'lightgray'
self.stopping = False
thread = threading.Thread(target=self._do)
thread.start()
def stop(self):
"""Stop a running data generator thread.
Does not need a lock, since the spawned timer thread will only read `self.stopping`.
"""
self._start_button.style.button_color = 'lightgray'
self._stop_button.style.button_color = 'tomato'
self.stopping = True
def get_widget(self):
"""Get ipywidget controls to stop and start the generator thread."""
return [self._start_button, self._stop_button]
|
websocket_client.py
|
import json
import ssl
import sys
import traceback
import socket
from datetime import datetime
from threading import Lock, Thread
from time import sleep
import time
import websocket
class WebsocketClient(object):
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
# For debugging
self._last_sent_text = None
self._last_received_text = None
def init(self, host: str, proxy_host: str = "", proxy_port: int = 0, ping_interval: int = 60, header: dict = None):
"""
:param ping_interval: unit: seconds, type: int
"""
self.host = host
self.ping_interval = ping_interval # seconds
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _ensure_connection(self):
""""""
triggered = False
with self._ws_lock:
if self._ws is None:
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
triggered = True
if triggered:
self.on_connected()
def _disconnect(self):
"""
"""
triggered = False
with self._ws_lock:
if self._ws:
ws: websocket.WebSocket = self._ws
self._ws = None
triggered = True
if triggered:
ws.close()
self.on_disconnected()
def _run(self):
"""
Keep running till stop is called.
"""
try:
while self._active:
try:
self._ensure_connection()
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
if not text:
self._disconnect()
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
print("websocket unable to parse data: " + text)
raise e
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (websocket.WebSocketConnectionClosedException, socket.error):
self._disconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
# self._run() will reconnect websocket
sleep(1)
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
#ws = self._ws
#if ws:
# ws.send("ping", websocket.ABNF.OPCODE_PING)
timestamp = int(time.time())
req = {"cmd":"ping","args":[timestamp],"id":"coray1912"}
self.send_packet(req)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
multiprocess.py
|
# -*- coding: utf-8 -*-
'''
Created on 2017年7月6日
@author: luzs
'''
from multiprocessing import Process, Pool
import os
import time
import random
#子进程
def myProcess(name):
print('Run child process %s (%s)' % (name, os.getpid()))
# threadpool
def long_time_task(name):
print('Run task %s (%s)' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
#
if __name__ == '__main__':
# print('Parent Process %s' % os.getpid())
# p = Process(target=myProcess, args=('test',))
# print('Child process will start.')
# p.start()
# p.join()
# print('Child process end.')
#####################################################################
#对pool对象调用join方法会等待所有子进程执行完毕,调用join之前必须先close,close之后就不能再添加新的process了
pool = Pool(4)
for x in range(5):
pool.apply_async(long_time_task,args=(x,))
print('Waiting for all subprocesses done...')
pool.close()
pool.join()
print('All subprocesses done.')
|
app.py
|
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
from dotenv import load_dotenv
from weather import Weather
import json
import threading
load_dotenv()
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/weather/inside')
def inside_temperature():
resultJson = {
'temperature': Weather.get_inside_temp(),
'humidity': Weather.get_inside_humidity(),
'pressure': Weather.get_inside_pressure(),
}
return json.dumps(resultJson)
@app.route('/weather/outside')
def outside_temperature():
resultJson = Weather.get_outside_weather()
return json.dumps(resultJson)
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.after_request
def apply_caching(response):
response.headers["Access-Control-Allow-Origin"] = "*"
return response
if __name__ == '__main__':
t1 = threading.Thread(target=Weather.start_weather_polling)
t1.start()
app.run(host='0.0.0.0', threaded=True)
|
pwm.py
|
import threading
import time
import RPi.GPIO as GPIO
RED = 7
BLUE = 22
QUIT = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(BLUE, GPIO.OUT)
GPIO.setup(RED, GPIO.OUT)
b = GPIO.PWM(BLUE, 50) # channel=12 frequency=50Hz
b.start(0)
r = GPIO.PWM(BLUE, 50) # channel=12 frequency=50Hz
r.start(0)
def cycle_red():
print "in red"
while QUIT == 0:
print "loop start"
for dc in range(0, 101, 5):
r.ChangeDutyCycle(dc)
time.sleep(0.1)
for dc in range(100, -1, -5):
r.ChangeDutyCycle(dc)
time.sleep(0.1)
def cycle_blue():
while QUIT == 0:
for dc in range(0, 101, 5):
b.ChangeDutyCycle(dc)
time.sleep(0.1)
for dc in range(100, -1, -5):
b.ChangeDutyCycle(dc)
time.sleep(0.1)
if __name__ == "__main__":
t1 = threading.Thread(target = cycle_red, args= ())
t2 = threading.Thread(target = cycle_blue, args= ())
t1.start()
t2.start()
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
QUIT = 1
pass
print 'exiting'
GPIO.cleanup()
|
util.py
|
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote, urlparse)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
OR = re.compile(r'^or\b\s*')
AND = re.compile(r'^and\b\s*')
NON_SPACE = re.compile(r'(\S+)\s*')
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end():]
elif not remaining:
raise SyntaxError('unexpected end of input')
else:
q = remaining[0]
if q not in '\'"':
raise SyntaxError('invalid expression: %s' % remaining)
oq = '\'"'.replace(q, '')
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError('error in string literal: %s' % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end():]
else:
s = ''.join(parts)
raise SyntaxError('unterminated string: %s' % s)
parts.append(q)
result = ''.join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == '(':
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ')':
raise SyntaxError('unterminated parenthesis: %s' % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end():]
rhs, remaining = marker_var(remaining)
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_expr(remaining)
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_and(remaining)
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith('#'):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError('name expected: %s' % remaining)
distname = m.groups()[0]
remaining = remaining[m.end():]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == '[':
i = remaining.find(']', 1)
if i < 0:
raise SyntaxError('unterminated extra: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError('malformed extra: %s' % s)
extras.append(m.groups()[0])
s = s[m.end():]
if not s:
break
if s[0] != ',':
raise SyntaxError('comma expected in extras: %s' % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == '@':
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError('invalid URI: %s' % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError('Invalid URL: %s' % uri)
remaining = remaining[m.end():].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end():]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError('invalid version: %s' % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end():]
if not ver_remaining or ver_remaining[0] != ',':
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError('invalid constraint: %s' % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != '(':
versions, remaining = get_versions(remaining)
else:
i = remaining.find(')', 1)
if i < 0:
raise SyntaxError('unterminated parenthesis: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError('invalid constraint: %s' % s)
v = m.groups()[0]
s = s[m.end():].lstrip()
if s:
raise SyntaxError('invalid constraint: %s' % s)
versions = [('~=', v)]
if remaining:
if remaining[0] != ';':
raise SyntaxError('invalid requirement: %s' % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != '#':
raise SyntaxError('unexpected trailing tmp: %s' % remaining)
if not versions:
rs = distname
else:
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
return Container(name=distname, extras=extras, constraints=versions,
marker=mark_expr, url=uri, requirement=rs)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(root)
return path[len(root):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
if os.path.exists(path):
os.remove(path)
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.write_binary_file(path, data.encode(encoding))
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
compile_kwargs = {}
if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.rsplit('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
if username:
username = unquote(username)
if password:
password = unquote(password)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#tmp = reader.read().decode('utf-8')
#result = json.loads(tmp)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external tmp for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
|
qt.py
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
""" This module provides a few methods and classes for visualizing data
associated to grids. We use the `PySide <http://www.pyside.org>`_ bindings
for the `Qt <http://www.qt-project.org>`_ widget toolkit for the GUI.
"""
import math as m
import numpy as np
try:
from PySide.QtGui import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QSlider, QApplication, QLCDNumber,
QAction, QStyle, QToolBar, QLabel, QFileDialog, QMessageBox)
from PySide.QtCore import Qt, QCoreApplication, QTimer
HAVE_PYSIDE = True
except ImportError:
HAVE_PYSIDE = False
import multiprocessing
import os
import signal
import time
from pymor.core.defaults import defaults
from pymor.core.interfaces import BasicInterface
from pymor.core.logger import getLogger
from pymor.core.exceptions import PySideMissing
from pymor.grids.oned import OnedGrid
from pymor.grids.referenceelements import triangle, square
from pymor.gui.gl import GLPatchWidget, ColorBarWidget, HAVE_GL, HAVE_QTOPENGL
from pymor.gui.matplotlib import Matplotlib1DWidget, MatplotlibPatchWidget, HAVE_MATPLOTLIB
from pymor.tools.vtkio import HAVE_PYVTK, write_vtk
from pymor.vectorarrays.interfaces import VectorArrayInterface
from pymor.vectorarrays.numpy import NumpyVectorArray
if HAVE_PYSIDE:
class PlotMainWindow(QWidget):
"""Base class for plot main windows."""
def __init__(self, U, plot, length=1, title=None):
super().__init__()
layout = QVBoxLayout()
if title:
title = QLabel('<b>' + title + '</b>')
title.setAlignment(Qt.AlignHCenter)
layout.addWidget(title)
layout.addWidget(plot)
plot.set(U, 0)
if length > 1:
hlayout = QHBoxLayout()
self.slider = QSlider(Qt.Horizontal)
self.slider.setMinimum(0)
self.slider.setMaximum(length - 1)
self.slider.setTickPosition(QSlider.TicksBelow)
hlayout.addWidget(self.slider)
lcd = QLCDNumber(m.ceil(m.log10(length)))
lcd.setDecMode()
lcd.setSegmentStyle(QLCDNumber.Flat)
hlayout.addWidget(lcd)
layout.addLayout(hlayout)
hlayout = QHBoxLayout()
toolbar = QToolBar()
self.a_play = QAction(self.style().standardIcon(QStyle.SP_MediaPlay), 'Play', self)
self.a_play.setCheckable(True)
self.a_rewind = QAction(self.style().standardIcon(QStyle.SP_MediaSeekBackward), 'Rewind', self)
self.a_toend = QAction(self.style().standardIcon(QStyle.SP_MediaSeekForward), 'End', self)
self.a_step_backward = QAction(self.style().standardIcon(QStyle.SP_MediaSkipBackward),
'Step Back', self)
self.a_step_forward = QAction(self.style().standardIcon(QStyle.SP_MediaSkipForward), 'Step', self)
self.a_loop = QAction(self.style().standardIcon(QStyle.SP_BrowserReload), 'Loop', self)
self.a_loop.setCheckable(True)
toolbar.addAction(self.a_play)
toolbar.addAction(self.a_rewind)
toolbar.addAction(self.a_toend)
toolbar.addAction(self.a_step_backward)
toolbar.addAction(self.a_step_forward)
toolbar.addAction(self.a_loop)
if hasattr(self, 'save'):
self.a_save = QAction(self.style().standardIcon(QStyle.SP_DialogSaveButton), 'Save', self)
toolbar.addAction(self.a_save)
self.a_save.triggered.connect(self.save)
hlayout.addWidget(toolbar)
self.speed = QSlider(Qt.Horizontal)
self.speed.setMinimum(0)
self.speed.setMaximum(100)
hlayout.addWidget(QLabel('Speed:'))
hlayout.addWidget(self.speed)
layout.addLayout(hlayout)
self.timer = QTimer()
self.timer.timeout.connect(self.update_solution)
self.slider.valueChanged.connect(self.slider_changed)
self.slider.valueChanged.connect(lcd.display)
self.speed.valueChanged.connect(self.speed_changed)
self.a_play.toggled.connect(self.toggle_play)
self.a_rewind.triggered.connect(self.rewind)
self.a_toend.triggered.connect(self.to_end)
self.a_step_forward.triggered.connect(self.step_forward)
self.a_step_backward.triggered.connect(self.step_backward)
self.speed.setValue(50)
elif hasattr(self, 'save'):
hlayout = QHBoxLayout()
toolbar = QToolBar()
self.a_save = QAction(self.style().standardIcon(QStyle.SP_DialogSaveButton), 'Save', self)
toolbar.addAction(self.a_save)
hlayout.addWidget(toolbar)
layout.addLayout(hlayout)
self.a_save.triggered.connect(self.save)
self.setLayout(layout)
self.plot = plot
self.U = U
self.length = length
def slider_changed(self, ind):
self.plot.set(self.U, ind)
def speed_changed(self, val):
self.timer.setInterval(val * 20)
def update_solution(self):
ind = self.slider.value() + 1
if ind >= self.length:
if self.a_loop.isChecked():
ind = 0
else:
self.a_play.setChecked(False)
return
self.slider.setValue(ind)
def toggle_play(self, checked):
if checked:
if self.slider.value() + 1 == self.length:
self.slider.setValue(0)
self.timer.start()
else:
self.timer.stop()
def rewind(self):
self.slider.setValue(0)
def to_end(self):
self.a_play.setChecked(False)
self.slider.setValue(self.length - 1)
def step_forward(self):
self.a_play.setChecked(False)
ind = self.slider.value() + 1
if ind == self.length and self.a_loop.isChecked():
ind = 0
if ind < self.length:
self.slider.setValue(ind)
def step_backward(self):
self.a_play.setChecked(False)
ind = self.slider.value() - 1
if ind == -1 and self.a_loop.isChecked():
ind = self.length - 1
if ind >= 0:
self.slider.setValue(ind)
_launch_qt_app_pids = set()
def _launch_qt_app(main_window_factory, block):
"""Wrapper to display plot in a separate process."""
def doit():
try:
app = QApplication([])
except RuntimeError:
app = QCoreApplication.instance()
main_window = main_window_factory()
main_window.show()
app.exec_()
import sys
if block and not getattr(sys, '_called_from_test', False):
doit()
else:
p = multiprocessing.Process(target=doit)
p.start()
_launch_qt_app_pids.add(p.pid)
def stop_gui_processes():
for p in multiprocessing.active_children():
if p.pid in _launch_qt_app_pids:
try:
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
@defaults('backend', sid_ignore=('backend',))
def visualize_patch(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,
separate_colorbars=False, rescale_colorbars=False, backend='gl', block=False, columns=2):
"""Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.
The grid's |ReferenceElement| must be the triangle or square. The data can either
be attached to the faces or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case a subplot is created for each entry of the tuple. The
lengths of all arrays have to agree.
bounding_box
A bounding box in which the grid is contained.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_colorbars
If `True`, use separate colorbars for each subplot.
rescale_colorbars
If `True`, rescale colorbars to data in each frame.
backend
Plot backend to use ('gl' or 'matplotlib').
block
If `True`, block execution until the plot window is closed.
columns
The number of columns in the visualizer GUI in case multiple plots are displayed
at the same time.
"""
if not HAVE_PYSIDE:
raise PySideMissing()
assert backend in {'gl', 'matplotlib'}
if backend == 'gl':
if not HAVE_GL:
logger = getLogger('pymor.gui.qt.visualize_patch')
logger.warn('import of PyOpenGL failed, falling back to matplotlib; rendering will be slow')
backend = 'matplotlib'
elif not HAVE_QTOPENGL:
logger = getLogger('pymor.gui.qt.visualize_patch')
logger.warn('import of PySide.QtOpenGL failed, falling back to matplotlib; rendering will be slow')
backend = 'matplotlib'
if backend == 'matplotlib' and not HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
else:
if not HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
# TODO extract class
class MainWindow(PlotMainWindow):
def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, rescale_colorbars, backend):
assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U.data.astype(np.float64, copy=False),) if hasattr(U, 'data') else \
tuple(u.data.astype(np.float64, copy=False) for u in U)
if isinstance(legend, str):
legend = (legend,)
assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
if backend == 'gl':
widget = GLPatchWidget
cbar_widget = ColorBarWidget
else:
widget = MatplotlibPatchWidget
cbar_widget = None
if not separate_colorbars and len(U) > 1:
l = getLogger('pymor.gui.qt.visualize_patch')
l.warn('separate_colorbars=False not supported for matplotlib backend')
separate_colorbars = True
class PlotWidget(QWidget):
def __init__(self):
super().__init__()
if separate_colorbars:
if rescale_colorbars:
self.vmins = tuple(np.min(u[0]) for u in U)
self.vmaxs = tuple(np.max(u[0]) for u in U)
else:
self.vmins = tuple(np.min(u) for u in U)
self.vmaxs = tuple(np.max(u) for u in U)
else:
if rescale_colorbars:
self.vmins = (min(np.min(u[0]) for u in U),) * len(U)
self.vmaxs = (max(np.max(u[0]) for u in U),) * len(U)
else:
self.vmins = (min(np.min(u) for u in U),) * len(U)
self.vmaxs = (max(np.max(u) for u in U),) * len(U)
layout = QHBoxLayout()
plot_layout = QGridLayout()
self.colorbarwidgets = [cbar_widget(self, vmin=vmin, vmax=vmax) if cbar_widget else None
for vmin, vmax in zip(self.vmins, self.vmaxs)]
plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
for vmin, vmax in zip(self.vmins, self.vmaxs)]
if legend:
for i, plot, colorbar, l in zip(range(len(plots)), plots, self.colorbarwidgets, legend):
subplot_layout = QVBoxLayout()
caption = QLabel(l)
caption.setAlignment(Qt.AlignHCenter)
subplot_layout.addWidget(caption)
if not separate_colorbars or backend == 'matplotlib':
subplot_layout.addWidget(plot)
else:
hlayout = QHBoxLayout()
hlayout.addWidget(plot)
if colorbar:
hlayout.addWidget(colorbar)
subplot_layout.addLayout(hlayout)
plot_layout.addLayout(subplot_layout, int(i/columns), (i % columns), 1, 1)
else:
for i, plot, colorbar in zip(range(len(plots)), plots, self.colorbarwidgets):
if not separate_colorbars or backend == 'matplotlib':
plot_layout.addWidget(plot, int(i/columns), (i % columns), 1, 1)
else:
hlayout = QHBoxLayout()
hlayout.addWidget(plot)
if colorbar:
hlayout.addWidget(colorbar)
plot_layout.addLayout(hlayout, int(i/columns), (i % columns), 1, 1)
layout.addLayout(plot_layout)
if not separate_colorbars:
layout.addWidget(self.colorbarwidgets[0])
for w in self.colorbarwidgets[1:]:
w.setVisible(False)
self.setLayout(layout)
self.plots = plots
def set(self, U, ind):
if rescale_colorbars:
if separate_colorbars:
self.vmins = tuple(np.min(u[ind]) for u in U)
self.vmaxs = tuple(np.max(u[ind]) for u in U)
else:
self.vmins = (min(np.min(u[ind]) for u in U),) * len(U)
self.vmaxs = (max(np.max(u[ind]) for u in U),) * len(U)
for u, plot, colorbar, vmin, vmax in zip(U, self.plots, self.colorbarwidgets, self.vmins,
self.vmaxs):
plot.set(u[ind], vmin=vmin, vmax=vmax)
if colorbar:
colorbar.set(vmin=vmin, vmax=vmax)
super().__init__(U, PlotWidget(), title=title, length=len(U[0]))
self.grid = grid
self.codim = codim
def save(self):
if not HAVE_PYVTK:
msg = QMessageBox(QMessageBox.Critical, 'Error', 'VTK output disabled. Pleas install pyvtk.')
msg.exec_()
return
filename = QFileDialog.getSaveFileName(self, 'Save as vtk file')[0]
base_name = filename.split('.vtu')[0].split('.vtk')[0].split('.pvd')[0]
if base_name:
if len(self.U) == 1:
write_vtk(self.grid, NumpyVectorArray(self.U[0], copy=False), base_name, codim=self.codim)
else:
for i, u in enumerate(self.U):
write_vtk(self.grid, NumpyVectorArray(u, copy=False), '{}-{}'.format(base_name, i),
codim=self.codim)
_launch_qt_app(lambda: MainWindow(grid, U, bounding_box, codim, title=title, legend=legend,
separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
backend=backend),
block)
def visualize_matplotlib_1d(grid, U, codim=1, title=None, legend=None, separate_plots=False, block=False):
"""Visualize scalar data associated to a one-dimensional |Grid| as a plot.
The grid's |ReferenceElement| must be the line. The data can either
be attached to the subintervals or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case several plots are made into the same axes. The
lengths of all arrays have to agree.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 1).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_plots
If `True`, use subplots to visualize multiple |VectorArrays|.
block
If `True`, block execution until the plot window is closed.
"""
if not HAVE_PYSIDE:
raise PySideMissing()
if not HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
class MainWindow(PlotMainWindow):
def __init__(self, grid, U, codim, title, legend, separate_plots):
assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U.data,) if hasattr(U, 'data') else tuple(u.data for u in U)
if isinstance(legend, str):
legend = (legend,)
assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
plot_widget = Matplotlib1DWidget(None, grid, count=len(U), vmin=[np.min(u) for u in U],
vmax=[np.max(u) for u in U], legend=legend, codim=codim,
separate_plots=separate_plots)
super().__init__(U, plot_widget, title=title, length=len(U[0]))
self.grid = grid
_launch_qt_app(lambda: MainWindow(grid, U, codim, title=title, legend=legend, separate_plots=separate_plots), block)
class PatchVisualizer(BasicInterface):
"""Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.
The grid's |ReferenceElement| must be the triangle or square. The data can either
be attached to the faces or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
bounding_box
A bounding box in which the grid is contained.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
backend
Plot backend to use ('gl' or 'matplotlib').
block
If `True`, block execution until the plot window is closed.
"""
def __init__(self, grid, bounding_box=([0, 0], [1, 1]), codim=2, backend=None, block=False):
assert grid.reference_element in (triangle, square)
assert grid.dim == 2
assert codim in (0, 2)
self.grid = grid
self.bounding_box = bounding_box
self.codim = codim
self.backend = backend
self.block = block
def visualize(self, U, discretization, title=None, legend=None, separate_colorbars=False,
rescale_colorbars=False, block=None, filename=None, columns=2):
"""Visualize the provided data.
Parameters
----------
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case a subplot is created for each entry of the tuple. The
lengths of all arrays have to agree.
discretization
Filled in :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_colorbars
If `True`, use separate colorbars for each subplot.
rescale_colorbars
If `True`, rescale colorbars to data in each frame.
block
If `True`, block execution until the plot window is closed. If `None`, use the
default provided during instantiation.
filename
If specified, write the data to a VTK-file using
:func:`pymor.tools.vtkio.write_vtk` instead of displaying it.
columns
The number of columns in the visualizer GUI in case multiple plots are displayed
at the same time.
"""
assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
and all(len(u) == len(U[0]) for u in U))
if filename:
if not isinstance(U, tuple):
write_vtk(self.grid, U, filename, codim=self.codim)
else:
for i, u in enumerate(U):
write_vtk(self.grid, u, '{}-{}'.format(filename, i), codim=self.codim)
else:
block = self.block if block is None else block
visualize_patch(self.grid, U, bounding_box=self.bounding_box, codim=self.codim, title=title,
legend=legend, separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
backend=self.backend, block=block, columns=columns)
class Matplotlib1DVisualizer(BasicInterface):
"""Visualize scalar data associated to a one-dimensional |Grid| as a plot.
The grid's |ReferenceElement| must be the line. The data can either
be attached to the subintervals or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 1).
block
If `True`, block execution until the plot window is closed.
"""
def __init__(self, grid, codim=1, block=False):
assert isinstance(grid, OnedGrid)
assert codim in (0, 1)
self.grid = grid
self.codim = codim
self.block = block
def visualize(self, U, discretization, title=None, legend=None, block=None):
"""Visualize the provided data.
Parameters
----------
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case several plots are made into the same axes. The
lengths of all arrays have to agree.
discretization
Filled in by :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
block
If `True`, block execution until the plot window is closed. If `None`, use the
default provided during instantiation.
"""
block = self.block if block is None else block
visualize_matplotlib_1d(self.grid, U, codim=self.codim, title=title, legend=legend, block=block)
|
__init__.py
|
from collections import namedtuple
from contextlib import contextmanager
import os
from multiprocessing import Manager, Queue, Process
from multiprocessing.managers import ListProxy
from multiprocess.progress_bar import progress_bar
from multiprocess.signals import STOP
from tqdm import tqdm
def available_cores():
return len(os.sched_getaffinity(0))
def worker(func, input: Queue, progress_bar_updates: Queue, output: ListProxy, *args):
"""Generic worker for map-like operations with progress bar.
Calls `func` on every object provided on `input_` queue
until `STOP` (None) is received. After each step queues
an update on `progress_bar` queue.
Results of `func` calls are appended to `output` list.
Args:
func: function to be called on queued objects
input: input queue
progress_bar_updates: progress_bar queue
output: managed list for results
*args: additional positional arguments to be passed to `func`
"""
while True:
data = input.get()
if data is STOP:
return
result = func(data, *args)
output.append(result)
progress_bar_updates.put(1)
api_template = namedtuple('API', 'queue, results')
@contextmanager
def multiprocessing_queue(target, args, processes, total):
manager = Manager()
results = manager.list()
queue = Queue()
api = api_template(queue, results)
processes_cnt = processes or available_cores()
# do not start more processes than necessary
if processes_cnt > total:
processes_cnt = total
with progress_bar(total) as progress_queue:
worker_args = [target, queue, progress_queue, results]
if args:
worker_args.extend(args)
processes = [
Process(target=worker, args=worker_args)
for _ in range(processes_cnt)
]
yield api
for _ in processes:
queue.put(STOP)
for process in processes:
process.start()
for process in processes:
process.join()
# TODO: is it possible to use partial instead of shared_args?
class Pool:
"""A pool with support for shared arguments and progress bar.
Interface is partially compatible with `multiprocessing.Pool`.
Only imap method is implemented so far.
"""
def __init__(self, processes):
self.processes = processes
def imap(self, func, iterable, shared_args=tuple()):
"""Iteratively apply function to items ofo `iterable` and return results.
The order of resultant list is not guaranteed to be preserved.
Items will be passed from `iterable` to pool queue one by one.
Args:
func: function to be applied to items
iterable: an iterable with items
shared_args: positional arguments to be passed to func after item
"""
if self.processes == 1:
# for profiling and debugging a single process works better
# (and there is less overhead than forking for one more)
return map(lambda i: func(i, *shared_args), tqdm(iterable))
with multiprocessing_queue(func, shared_args, self.processes, total=len(iterable)) as api:
for item in iterable:
api.queue.put(item)
return api.results
|
pac.py
|
# coding=utf-8
"""
PaC Adventure Creator
A library for creating a text-based interactive story.
"""
import logging
import pickle
import threading
import time
import os
import textwrap
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# Check for pygame
try:
from pygame import mixer
except ImportError:
mixer = None
logging.warn("pygame is not installed, music will NOT work.")
__author__ = "DefaltSimon"
__version__ = "0.4.2"
# EVENTS
# DEPRECATED, use decorators!
PICKUP = "pickup"
USE_ITEM = "use-item"
USE_OBJECT = "use-object"
COMBINE = "combine"
START = "start"
ENTER = "enter"
MUSIC_CHANGE = "music"
# Total length of the room name header
PADDING = 65
tw = None
# For simple threading
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def update_textwrap():
global tw
tw = textwrap.TextWrapper(width=PADDING, break_long_words=False, replace_whitespace=False)
update_textwrap()
def wrap_text(s):
if tw.width != PADDING:
update_textwrap()
print(textwrap.fill(s, PADDING))
def get_wrap(s):
if tw.width != PADDING:
update_textwrap()
return textwrap.fill(s, PADDING)
# What have I done
# def save(fn):
# fn.__self__.save.save(fn.__self__.currentroom, fn.__self__.roombeforethisone, fn.__self__.inv)
# return fn
# Exception classes
class PacException(Exception):
"""
General exception class, other exceptions are subclassed to this.
"""
pass
class MissingParameters(PacException):
"""
Thrown when some parameters are missing.
"""
pass
class InvalidParameters(PacException):
"""
To be expected when an invalid variable type was passed.
"""
pass
class NotLinked(PacException):
"""
Thrown when a room you tried to enter is not liked to the current room.
"""
pass
class AlreadyExists(PacException):
"""
Raised when an Item or Room with the same name already exist.
"""
pass
# Singleton class
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
# Music player
class Music:
"""
Represents the music that is played when entering a room or interacting with an object.
"""
def __init__(self, path):
"""
Initializes the Music: checks presence of the file path and music compatibility.
:param path: path to file
:return: None
"""
if not mixer:
return
if not os.path.isfile(path):
log.error("{} does not exist.".format(path))
self.path = str(path)
# self._keep_alive = threading.Event()
self.is_started = False
def start(self, repeat=True):
"""
Starts playing the music (threading is now not needed).
:param repeat: optional, defaults to True; specifies if the sound file should be repeatedly played.
:return: None
"""
if repeat:
mixer.music.load(self.path)
mixer.music.play(-1)
else:
mixer.music.load(self.path)
mixer.music.play()
time.sleep(0.1)
@staticmethod
def stop(ttl=0.5):
"""
Stops the music
:return: None
"""
if not isinstance(ttl, (int, float)):
raise InvalidParameters
mixer.music.fadeout(ttl * 1000) # Non-blocking
time.sleep(ttl) # Should wait for the same amount
return
# Room Object
class Room(object):
"""
Represents a room that the player can move into and interact with its objects, etc...
"""
def __init__(self, name, desc, enter_description=None, starting=False):
self.name = str(name)
self.desc = str(desc)
self.on_first_enter = enter_description
self.is_default = bool(starting)
self.entered = False
self.items = {}
self.item_descriptions = {}
self.statics = {}
self.static_obj_descriptions = {}
self.requirements = {
"items": [],
"visited": [],
}
self.music = None
def description(self):
"""
:return: Room description string
"""
return self.desc
def put_item(self, item, description):
"""
Puts an Item into the room.
:param item: Item object to put in the room
:param description: string to display when the item is in the room.
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.items[item.name] = item
self.item_descriptions[item.name] = str(description)
def put_static_obj(self, obj, description):
"""
Places a StaticObject into the room.
:param obj: StaticObject object to place into the room
:param description: string to display when the object is in the room
:return: None
"""
if not isinstance(obj, StaticObject):
raise InvalidParameters
self.statics[obj.name] = obj
self.static_obj_descriptions[obj.name] = str(description)
def enter(self):
"""
:return: Room description, includes 'first enter description' if it is the first time entering the room. Also includes any items found in the room.
"""
# Build item descriptions if they exists (if there are any items in the room)
items = ("\n" if self.item_descriptions.values() else "") + "\n".join(self.item_descriptions.values())
# Builds static objects descriptions if they exist in the room
statics = (" " if self.static_obj_descriptions.values() else "") + " ".join(self.static_obj_descriptions.values())
if not self.entered:
self.entered = True
if self.on_first_enter:
return str(self.on_first_enter + statics + "\n" + self.desc + items)
else:
return self.desc + statics + items
else:
return self.desc + statics + items
def get_items(self):
"""
:return: A list of items in the room
"""
return list(self.items.values())
def get_static_items(self):
"""
:return: A list of static objects in the room
"""
return list(self.statics.values())
def use_item(self, item):
"""
:param item: Item object to use
:return: Item on_use string
"""
# Converts string to Item if needed
if isinstance(item, Item):
pass
else:
item = self.items[item]
desc = item.use()
self.items.pop(item.name)
self.item_descriptions.pop(item.name)
return desc
def pick_up_item(self, item):
"""
:param item: Item object or string (item name)
:return: Item on_pickup string
"""
# Converts string to Item if needed and checks if the item exists in the room
if isinstance(item, Item):
try:
it = self.items[item.name]
del it
except KeyError:
return False
else:
try:
item = self.items[item.name]
except KeyError:
return False
desc = item.pick_up()
self.items.pop(item.name)
self.item_descriptions.pop(item.name)
return desc
def add_visit_requirement(self, room, on_deny):
"""
Adds a room visit requirement to the room.
:param room: Room object or room name
:param on_deny: string
:return: None
"""
if not isinstance(room, Room):
raise InvalidParameters
else:
self.requirements["visited"].append((room, on_deny)) # Tuple
def add_item_requirement(self, item, on_deny):
"""
Adds an item requirement to the room.
:param item: Item object
:param on_deny: Message to be printed when not trying to enter the room and not having the item.
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.requirements["items"].append((item, on_deny)) # Tuple
def has_visit_requirement(self, visited_rooms):
"""
Indicates if the room has all room visit requirements.
:param visited_rooms: A list of Room objects that the player has visited
:return: 1 if has all requirements, str of required messages joined with \n otherwise.
"""
if not isinstance(visited_rooms, list):
raise InvalidParameters
# Build room list
rms = [this[0] for this in self.requirements.get("visited")]
for element in rms:
if element not in visited_rooms:
places = []
for item in self.requirements["visited"]:
places.append(item[1])
return "\n".join(places)
return 1
def has_item_requirements(self, items):
"""
Indicates if the room has all Item requirements.
:param items: A list of Item objects (in the player's inventory)
:return: 1 if has all requirements, str of required messages joined with \n otherwise.
"""
if not isinstance(items, list):
raise InvalidParameters
nit = self.requirements["items"]
for el in nit:
if el[0] not in items:
ls = []
for item in nit:
ls.append(item[1])
return "\n".join(ls)
return 1
def add_music(self, music):
"""
Adds a Music object that will start playing when the player enters.
:param music: path or Music
:return: None
"""
if not isinstance(music, Music):
raise InvalidParameters
self.music = music
# Item in the room or inventory
class Item(object):
"""
An item that the player can pick up, use, combine, etc.
"""
def __init__(self, name, desc, on_use, on_failed_use, on_failed_pickup, on_pickup=None, is_craftable=False, crafting_description=None):
self.name = str(name)
self.desc = str(desc)
self.used = False
self.picked_up = False
self.crafted = False
self.on_use = str(on_use)
self.on_pickup = on_pickup
self.is_craftable = bool(is_craftable)
self.crafting_description = crafting_description
self.on_failed_use = on_failed_use
self.on_failed_pickup = on_failed_pickup
self.pickup_requires = []
self.use_requires = []
def description(self):
"""
:return: Item description string
"""
return self.desc
def was_used(self):
"""
:return: A bool indicating if the Item has been used.
"""
return bool(self.used)
def use(self):
"""
"Uses" the item, settings its used property to True.
:return: Item on_use string
"""
# Must use hasUseRequirements!
self.used = True
return self.on_use
def pick_up(self):
"""
"Picks up" the item
:return: Item on_pickup string
"""
# Must use hasPickUpRequirements!
self.picked_up = True
return self.on_pickup
def craft(self):
if not self.is_craftable:
return False
else:
self.crafted = True
return self.crafting_description
def has_pick_up_requirements(self, items):
"""
Checks if you have the proper items to pick up this one.
:param items: A list of Item objects (usually your inventory)
:return: Bool indicating the result
"""
if isinstance(items, list):
has_items = True
for item in self.pickup_requires:
try:
items.index(item)
except ValueError:
has_items = False
return bool(has_items)
def has_use_requirements(self, items):
"""
Checks if you have the proper items to use this one.
:param items: A list of Item objects (usually your inventory)
:return: Bool indicating the result
"""
if isinstance(items, list):
has_items = True
for item in self.use_requires:
try:
items.index(item)
except ValueError:
has_items = False
return bool(has_items)
def add_pick_up_requirement(self, item):
"""
Adds a pick up requirement for this item.
:param item: Item object
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.pickup_requires.append(item)
def add_use_requirement(self, item):
"""
Adds a use requirement for this item.
:param item: Item object
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.use_requires.append(item)
class StaticObject(object):
def __init__(self, name, display, on_use, on_failed_use):
self.name = str(name)
self.display = str(display)
self.on_use = str(on_use)
self.used = False
self.on_failed_use = on_failed_use
self.item_requirements = []
self.item_blueprints = {}
self.music = None
def was_used(self):
"""
:return: Bool indicating if the StaticObject was used.
"""
return bool(self.used)
def use(self):
"""
:return: on_use string
"""
self.used = True
return self.on_use
def use_with_item(self, item):
"""
Uses the Item on the StaticObject if a blueprint for it exists
:param item: Item
:return: Description defined with addItemBlueprint
"""
if not isinstance(item, Item):
raise InvalidParameters
if item.name in self.item_blueprints:
return self.item_blueprints[item.name]
else:
return False
def take_notice(self):
"""
Don't mind the name.
:return: display string
"""
return self.display
def look_at(self):
"""
Just an alias for take_notice()
:return: display string
"""
return self.take_notice()
def has_item_requirements(self, items):
"""
Checks if you have the proper items to pick up this one.
:param items: A list of Item objects (usually your inventory)
:return: Bool indicating the result
"""
if isinstance(items, list):
has_items = True
for item in self.item_requirements:
try:
items.index(item)
except ValueError:
has_items = False
return bool(has_items)
else:
return False
def add_item_requirement(self, item):
"""
Adds a pick up requirement for this item.
:param item: Item object
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.item_requirements.append(item)
def add_item_blueprint(self, item, description):
"""
Add the Item to the list of usable items for this StaticObject.
:param item: Item object
:param description: string to display when using this item on this static object
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.item_blueprints[item.name] = str(description)
def add_music(self, music):
"""
Adds a Music object that will start playing when the player enters.
:param music: path or Music
:return: None
"""
if not isinstance(music, Music):
raise InvalidParameters
self.music = music
class EventDispatcher(metaclass=Singleton):
"""
The event handler/dispatcher for PaC
It is a Singleton (can only have one instance)
"""
def __init__(self):
self.events = {
"pickup": [],
"use-item": [],
"use-object": [],
"start": [],
"combine": [],
"enter": [],
"music": []
}
def _register_event(self, event_type, fn):
"""
Should not be used directly, use decorators instead
:param event_type: one of below
:param fn: function's reference ('doit' NOT 'doit()')
:return: None
"""
if event_type not in self.events.keys():
raise InvalidParameters
self.events.get(event_type).append(fn)
def dispatch_event(self, event_type, **kwargs):
"""
Runs the registered functions for the event type
:param event_type: one of the events
:return: None
"""
for fn in self.events.get(event_type):
if not kwargs:
fn()
else:
fn(**kwargs)
# @decorators for fast event registering
def on_enter(self, fn):
self._register_event(ENTER, fn)
return fn
def on_pickup(self, fn):
self._register_event(PICKUP, fn)
return fn
def on_item_use(self, fn):
self._register_event(USE_ITEM, fn)
return fn
def on_object_use(self, fn):
self._register_event(USE_OBJECT, fn)
return fn
def on_combine(self, fn):
self._register_event(COMBINE, fn)
return fn
def on_start(self, fn):
self._register_event(START, fn)
return fn
def on_music_change(self, fn):
self._register_event(MUSIC_CHANGE, fn)
return fn
class SaveGame:
"""
A module that allows you to save the game.
"""
def __init__(self, name, version):
"""
Initializes the SaveGame.
:param name: name of the current game
:return: None
"""
self.game_name = str(name)
self.game_version = str(version)
def save(self, data):
"""
Saves the current state to save/name_divided_by_.save.
:param data: a tuple or list - current room, previous room, inventory (list)
:return: None
"""
path = "save/{}.save".format(str(self.game_name).replace(" ", "_"))
if not os.path.isdir("save"):
os.makedirs("save")
log.debug("Saving game...")
pickle.dump(data, open(path, "wb"), pickle.HIGHEST_PROTOCOL)
def load(self):
"""
Loads the save if it exists.
:return: a tuple in order: current room, previous room, inventory (list)
"""
path = "save/{}.save".format(self.game_name.replace(" ", "_"))
if not os.path.isfile(path):
return None
with open(path, "rb") as file:
data = pickle.load(file)
if not str(data.get("game_info").get("version")) == self.game_version:
return None
return data
def has_valid_save(self):
"""
Indicates if a valid save is present.
:return: bool
"""
path = "save/{}.save".format(self.game_name.replace(" ", "_"))
if os.path.isfile(path):
with open(path, "rb") as file:
d = pickle.load(file)
a = bool(d.get("game_info").get("version") == self.game_version)
b = bool(d.get("game_info").get("name") == self.game_name)
return bool(a is True and b is True)
else:
return False
# TextInterface handles player interaction
class TextInterface(metaclass=Singleton):
"""
The basic Text interface that the player interacts with.
"""
def __init__(self, autosave=True, ask_for_save=True):
self.running = True
self.autosave = bool(autosave)
self.ask_for_save = bool(ask_for_save)
self.save_count = 0
def begin_adventure(self, pac):
"""
Prints the starting message and begins the while True loop, starting user interaction : the game.
:param pac: PaCInterpreter created by user
:return: None
"""
def get_room_header(room):
if isinstance(room, Room):
room = room.name
else:
room = str(room)
wys = ", ".join(pac.ways())
ti = int( (PADDING-len(room)) / 2)
hd = ("-" * ti) + room + ("-" * ti) + "\n" + "You can go to: " + wys + "\n" # Header
return hd
def text_adventure():
inp = str(input(">"))
if inp == "help" or inp == "what to do":
commands = ["go", "pick up", "use", "inv", "where", "combine", "save", "settings", "exit"]
wrap_text(", ".join(commands))
# Displays possible ways out of the room (DEPRECATED!)
elif inp.startswith(("ways", "path", "paths", "way")):
wrap_text("You can go to: " + ", ".join(pac.ways()))
elif inp.startswith(("settings", "preferences")):
print("1. autosave : {}\n2. exit".format("enabled" if self.autosave else "disabled"))
ce = str(input())
if ce.startswith(("1", "autosave")):
ce = str(input("Do you want to turn Autosaving On or Off? "))
if ce.startswith(("on", "ON", "True", "turn it on")):
self.autosave = True
print("Autosaving: enabled")
else:
self.autosave = False
print("Autosaving: disabled")
# Gives you a list of items in the room (DEPRECATED!)
elif inp.startswith(("items", "objects", "items in the room", "what items are in the room")):
object_list = pac.get_current_room().get_items()
objects = []
for obj in object_list:
# Just for the correct grammar jk
if str(obj.name).startswith(("a", "e", "i", "o", "u")):
um = "an"
else:
um = "a"
objects.append(str(um + " " + obj.name))
# Correct prints
if len(objects) == 0:
print("There are no items here.")
elif len(objects) == 1:
print("In the room there is " + objects[0] + "\n")
else:
wrap_text("In the room there are " + ", ".join(objects))
# Moves the player back to the previous room.
elif inp.startswith("go back"):
try:
desc = pac.go_back()
except NotImplementedError or NotLinked or AttributeError:
return
if not isinstance(desc, list):
print(get_room_header(pac.current_room))
wrap_text(desc)
else:
wrap_text(desc[0])
# Moves the player to a different room
elif inp.startswith(("walk ", "go ", "go to ", "walk to ", "walk down ", "go down")):
# Properly cuts the string
if inp.startswith("walk down"):
rn = inp[len("walk down "):]
elif inp.startswith("walk "):
rn = inp[len("walk "):]
elif inp.startswith("go to"):
rn = inp[len("go to "):]
elif inp.startswith("go down"):
rn = inp[len("go down "):]
elif inp.startswith("go "):
rn = inp[len("go "):]
elif inp.startswith("walk to"):
rn = inp[len("walk to "):]
# Printed when you do "walk somethingthatisnothere"
else:
print("Where do you want to go?")
return
# Resolves a/an/the
if rn.startswith("a "):
rn = rn[len("a "):]
elif rn.startswith("an "):
rn = rn[len("an "):]
elif rn.startswith("the "):
rn = rn[len("the "):]
# Walks and prints
try:
desc = pac.walk(str(rn))
except (NotImplementedError or NotLinked):
return
if not isinstance(desc, list):
print(get_room_header(pac.current_room))
wrap_text(desc)
else:
wrap_text(desc[0])
# Picks up the item in the room and puts it into your inventory
elif inp.startswith("pick up"):
on = inp[len("pick up "):]
if not on:
print("What do you want to pick up?")
# Resolves a/an/the
if on.startswith("a "):
on = on[len("a "):]
elif on.startswith("an "):
on = on[len("an "):]
elif on.startswith("the "):
on = on[len("the "):]
on_use = pac.pick_up_item(on)
if not on_use:
pass
else:
wrap_text(on_use)
# Uses the item in your inventory
elif inp.startswith("use"):
on = inp[len("use "):]
if not on:
print("What?")
# Resolves a/an/the
if on.startswith("a "):
on = on[len("a "):]
elif on.startswith("an "):
on = on[len("an "):]
elif on.startswith("the "):
on = on[len("the "):]
try:
desc = pac.use_item(pac.get_item_by_name(on))
except NotImplementedError:
spl = on.split(" with ")
if len(spl) == 1:
spl = on.split(" on ")
if len(spl) == 1:
print("What do you want to use?")
return
try:
desc = pac.use_static_object(pac.get_static_object_by_name(spl[1]), pac.get_item_by_name(spl[0]))
except NotImplementedError:
return
if not desc:
return
wrap_text(desc)
elif inp.startswith("combine"):
# Finds the two Item objects
sr = inp[len("combine "):]
if not sr:
print("What do you want to combine?")
return
# Does proper splitting with multiple keywords
sr = sr.split("with")
if len(sr) == 1:
sr = sr[0].split("and")
if len(sr) == 1:
print("Use: combine item1 with item2...")
return
try:
crafting_desc = pac.combine(sr[0].strip(" "), sr[1].strip(" "))
except NotImplementedError:
return
if not crafting_desc:
wrap_text(pac.d_failed_combine)
return
wrap_text(crafting_desc)
# Displays items in your inventory
elif inp.startswith(("inventory", "inv")):
# Converts items to a list of names
items = []
for it in pac.get_inventory():
# Just for the correct grammar jk
if str(it.name).startswith(("a", "e", "i", "o", "u")):
um = "an"
else:
um = "a"
items.append(str(um + " " + it.name))
# Correct prints
if len(items) == 0:
print("You do not have anything in your inventory.")
elif len(items) == 1:
print("You have " + items[0])
elif len(items) == 2:
wrap_text("You have " + items[0] + " and " + items[1])
else:
wrap_text("You have " + ", ".join(items))
# Tells you what room you are currently in
elif inp.startswith(("where am i", "where", "room")):
wrap_text("You are in the " + str(pac.get_current_room().name))
# Saves the game
elif inp.startswith(("save", "save game", "do a save", "gamesave")):
pac._save_game()
print("Game has been saved.")
# Option to quit game
elif inp.startswith(("exit", "quit", "q")):
n = str(input("Are you sure?"))
if str(n).lower().startswith(("yes", "yup", "ye", "sure", "y")):
ce = str(input("Would you like to save your current game? y/n "))
if ce.lower() == "y":
pac._save_game()
print("Game saved, bye!")
else:
print("Bye!")
self.running = False
return
elif n.lower().startswith(("no", "nope", "n", "not sure")):
return
# This part
# Prints the starting message and the usual for the starting room and then enters the 'infinite' loop.
pac._init_save() # creates SaveGame instance at pac.saving
if self.ask_for_save and pac.saving.has_valid_save():
doit = str(input("A save has been found. Do you want to load the save? y/n "))
if doit.lower() == "y" or not doit:
pac._load_game()
# Start music if the room has it
if pac.current_room.music:
pac._start_music_thread(pac.currentroom.music)
print("Save loaded.")
# Require confirmation
elif doit.lower() == "n" or doit:
c = input("Are you sure? With next save all your progress will be lost. y (continue) / n (load save anyway) ")
if c.lower() == "n":
pac._load_game()
if pac.currentroom.music:
pac._start_music_thread(pac.currentroom.music)
print("Save loaded.")
else:
pass
else:
log.warn("Got unexpected response.")
print(pac.starting_message + "\n" + get_room_header(pac.current_room.name))
wrap_text(pac.current_room.enter())
while self.running: # Defaults to True, creates an infinite loop until exiting
if self.save_count >= 4 and self.autosave is True:
pac._save_game()
self.save_count = 0
text_adventure()
self.save_count += 1
# Code never reaches this point... probably (except when quitting)
# Main class
class PaCInterpreter(metaclass=Singleton):
"""
The interpreter, linking together all objects and your code.
PaC stands for point and click (adventure) ;)
Also, it is a Singleton (meaning it can only have one instance)
"""
def __init__(self, name=None, desc=None, version=None, autosave=True):
# Game info vars
self.name = name
self.description = desc
self.version = version
self.saving = None
# Game 'engine' stuff
self.rooms = {}
self.items = {}
self.statics = {}
self.blueprints = []
self.inv = []
self.visits = []
self.links = {}
self.current_room = None
self.previous_room = None
self.starting_room = None
self.starting_message = None
self.running = False
# Defaults (default pick up is defined in createItem)
self.d_use = "Hmm..."
self.d_failed_use = "Hmm..."
self.d_failed_pickup = "I can't do that."
self.d_failed_combine = "Can't do that..."
self.music_thread = None
self.events = None
self.autosave = autosave
def _set_event_dispatcher(self, event_dispatcher):
"""
!DEPRECATED!
EventDispatcher is now a singleton so setting one is not needed
Associates the instance of EventDispatcher with PacInterpreter
:param event_dispatcher: an instance of EventDispatcher (optional)
:return: None
"""
if not isinstance(event_dispatcher, EventDispatcher):
raise InvalidParameters
self.events = event_dispatcher
def start(self, ask_for_save=True):
"""
Starts the adventure with you in the default room and the starting message.
If you have not defined a starting room or message, MissingParameters will be raised.
:param: ask_for_save: bool indicating if the user should be asked to load a save (of one is present)
:return: None
"""
self.running = True
self.current_room = self.starting_room
mixer.init() # Initializes the mixer module (for Music)
if not self.events:
self.events = EventDispatcher()
self.events.dispatch_event(START)
if not self.starting_room or not self.starting_message:
raise MissingParameters
self.visits.append(self.current_room)
# Instances the TextInterface class (no need for it to be class-wide for now)
text_interface = TextInterface(autosave=self.autosave, ask_for_save=ask_for_save)
# If the starting room has music, start playing.
if self.starting_room.music:
self._start_music_thread(self.starting_room.music)
# With this the TextInterface has the access to the class - the 'story'.
# Prints the starting message and begins the while True loop.
text_interface.begin_adventure(self)
def set_default_use_fail_message(self, message):
"""
Sets the default message to return when not being able to use an item (when not overridden by Item specific fail message).
:param message: string
:return: None
"""
self.d_failed_use = str(message)
def set_default_pick_up_fail_message(self, message):
"""
Sets the default message to return when not being able to pick up an item (when not overridden by Item specific fail message).
:param message: string
:return: None
"""
self.d_failed_pickup = str(message)
def set_default_combine_fail_message(self, message):
"""
Sets the default message for when failed to combine.
:param message: string
:return: None
"""
self.d_failed_combine = str(message)
def set_default_use_message(self, message):
"""
Sets the default message for when using an item (when not overridden by item specific use message)
:param message:
:return:
"""
self.d_use = str(message)
def set_starting_message(self, message):
"""
Sets the starting message. Necessary before calling start().
:param message: string
:return: None
"""
self.starting_message = str(message)
@staticmethod
def set_textwrap_length(length):
"""
Defaults to 65.
:param length: int
:return: None
"""
global PADDING
PADDING = int(length)
def set_autosave(self, action):
"""
Enables or disabled autosave.
:param action: bool
:return: None
"""
self.autosave = bool(action)
def get_rooms(self):
"""
Returns a dictionary of rooms created.
{room name : Room object, ...}
:return: dict of rooms
"""
return self.rooms
def get_current_room(self):
"""
Returns the current room.
:return: Room object
"""
if not self.current_room:
raise NotImplementedError
return self.current_room
def get_inventory(self):
"""
Returns a list of Items in the players inventory.
:return: list of Items
"""
return list(self.inv)
def get_room_by_name(self, name):
"""
Returns the Room by its name.
:param name: room name string
:return: Room object
"""
return self.rooms[str(name)]
def get_item_by_name(self, item):
"""
Returns the Item by its name. Raises NotImplementedError if the item does not exist.
:param item: item name string
:return: Item object
"""
try:
return self.items[item]
except KeyError:
raise NotImplementedError
def get_static_object_by_name(self, obj):
"""
Returns the StaticObject by its name. Raises NotImplementedError if the item does not exist
:param obj: object name string
:return: StaticObject object
"""
try:
return self.statics[obj]
except KeyError:
raise NotImplementedError
def create_room(self, name, desc, on_first_enter=None, starting=False):
"""
Creates a Room with supplied properties.
:param name: room name
:param desc: room description
:param on_first_enter: description to be displayed when entering the room for the first time
:param starting: bool indicating if the room should be the starting one
:return: created Room object
"""
if not name:
raise MissingParameters
if name in self.rooms:
raise AlreadyExists
room = Room(name, desc, on_first_enter, starting)
self.rooms[str(name)] = room
if starting:
self.starting_room = room
return room
def create_item(self, name, desc, on_use=None, failed_use=None, failed_pickup=None, on_pickup=None, is_craftable=False, crafting_desc=None):
"""
Creates an Item with supplied properties. All parameters have to be strings.
If on_use is not supplied, the item is 'kinda' unusable, printing "hmmm..." on use.
:param name: item name
:param desc: item description
:param on_use: string to be displayed when using the item
:param failed_use: string to be displayed when not being able to use the item
:param failed_pickup: string to be displayed when not being able to pick up the item
:param on_pickup: string to be displayed when picking up the item
:param is_craftable: bool
:param crafting_desc: str to display when this item is crafted
:return: created Item object
"""
if not name or not desc:
raise InvalidParameters
if not on_use:
on_use = self.d_use
if not on_pickup:
on_pickup = "You picked up {}".format(str(name))
if not failed_use:
failed_use = self.d_failed_use
if not failed_pickup:
failed_pickup = self.d_failed_pickup
if not crafting_desc:
crafting_desc = "By combining you created a {}".format(str(name))
obj = Item(name, desc, on_use, failed_use, failed_pickup, on_pickup, is_craftable, crafting_desc)
# 'Registers' the object for getItemByName()
self.items[obj.name] = obj
return obj
def create_blueprint(self, item1, item2, final_item):
"""
Creates a blueprint for combining two items together to make another item which also has to be the Item object.
The order does not matter.
:param item1: First Item object to combine
:param item2: Second Item object to combine
:param final_item: Item object that will be the result
:return: None
"""
# Converts from str to Item objects if needed
if not isinstance(item1, Item):
item1 = self.get_item_by_name(item1)
if not isinstance(item2, Item):
item2 = self.get_item_by_name(item2)
if not isinstance(final_item, Item):
final_item = self.get_item_by_name(final_item)
# Done converting, now append the blueprint to self.blueprints in the form of tuple
app = (item1, item2, final_item)
self.blueprints.append(app)
def create_static_item(self, name, display, on_use=None, failed_use=None):
"""
Creates a StaticObject that can 'sit' in the room and be interacted with.
It can not be picked up, but can be used with/without special items.
:param name: object name
:param display: string that will be displayed when the object is in the room
:param on_use: string that will be displayed when using the object without special items.
:param failed_use: displayed when not able to use the object
:return: StaticObject object
"""
if not name or not display:
raise InvalidParameters
if not on_use:
on_use = self.d_use
if not failed_use:
failed_use = self.d_failed_use
obj = StaticObject(name, display, on_use, failed_use)
self.statics[name] = obj
return obj
def combine(self, item1, item2):
"""
Combines two items together is there is a blueprint for the combination.
:param item1: Item object or item name
:param item2: Item object or item name
:return: Combined item (Item)
"""
# Converts to Item objects if needed
if not isinstance(item1, Item):
item1 = self.get_item_by_name(item1)
if not isinstance(item2, Item):
item2 = self.get_item_by_name(item2)
# Checks existence in inventory
if not ((item1 in self.inv) and (item2 in self.inv)):
return False
# Shifts through blueprints
for c, blue in enumerate(self.blueprints):
item1 = blue[0]
item2 = blue[1]
if (item1 == item1 and item2 == item2) or (item1 == item2 and item2 == item1):
result = blue[2]
self.inv.pop(c)
self.inv.pop(c)
self.put_into_inv(result)
# Dispatch event
self.events.dispatch_event(COMBINE, item1=item1, item2=item2, result=result)
return result.craft()
return False
def link_room(self, room1, room2, two_way=False):
"""
Links two rooms together (one-way or two-way).
:param room1: Room to link from
:param room2: Room to link to
:param two_way: Defaults to False, indicates of the path should be two-way
:return: None
"""
if not isinstance(room1, Room) or not isinstance(room2, Room):
raise InvalidParameters
# First link
try:
self.links[room1.name].append(room2.name)
except KeyError:
self.links[room1.name] = []
self.links[room1.name].append(room2.name)
# Second link, if two_way is True
if two_way:
try:
self.links[room2.name].append(room1.name)
except KeyError:
self.links[room2.name] = []
self.links[room2.name].append(room1.name)
@staticmethod
def put_item(room, item, description):
"""
Puts an item into a room.
:param room: Room to put the Item into
:param item: Item to put in the Room
:param description: string to display when an item is in the room
:return: None
"""
if not isinstance(room, Room) or not isinstance(item, Item):
raise InvalidParameters
room.put_item(item, description)
@staticmethod
def put_static_item(room, obj):
"""
Puts a StaticObject into a room. (description is provided by StaticObject.display string)
:param room: Room to put the StaticObject into
:param obj: StaticObject to put in the Room
:return: None
"""
if not isinstance(room, Room) or not isinstance(obj, StaticObject):
raise InvalidParameters
room.put_static_obj(obj, obj.display)
def put_into_inv(self, item):
"""
Puts an Item into your inventory.
:param item: Item to put
:return: None
"""
if not isinstance(item, Item):
raise InvalidParameters
self.inv.append(item)
def pick_up_item(self, item):
"""
Picks up the item in the current room.
:param item:
:return: False if failed, item on_pickup string if successful.
"""
# Converts string to Item if needed
if not isinstance(item, Item):
try:
item = self.items[item]
except KeyError:
return False
if not item == self.current_room.items.get(item.name):
return False
if not item.has_pick_up_requirements(self.inv):
if item.onfaileduse is not None:
return str(item.on_failed_pickup)
else:
return self.d_failed_pickup
it = self.current_room.pick_up_item(item)
self.events.dispatch_event(PICKUP, item=item, desc=it)
that_item = self.items[item.name]
self.put_into_inv(that_item)
return it
def use_item(self, item):
"""
Uses an Item in your inventory
:param item: Item to use
:return: False if failed, Item on_use string is successful
"""
if not isinstance(item, Item):
raise InvalidParameters
if item not in self.inv:
return False
else:
if not item.has_use_requirements(self.inv):
if item.on_failed_use is not None:
return str(item.on_failed_use)
else:
return self.d_failed_use
desc = item.use()
self.events.dispatch_event(USE_ITEM, item=item, desc=desc)
return desc
def use_static_object(self, obj, item=None):
"""
Uses the StaticObject in the room.
:param obj: StaticObject
:param item: Item to use with (optional)
:return: StaticObject display string
"""
if not isinstance(obj, StaticObject):
raise InvalidParameters
if obj not in self.current_room.get_static_items():
return False
else:
if not obj.has_item_requirements(self.inv):
if obj.on_failed_use is not None:
return str(obj.on_failed_use)
else:
return self.d_failed_use
if not item:
if obj.music:
self._start_music_thread(obj.music)
desc = obj.use()
else:
if obj.music:
self._start_music_thread(obj.music)
desc = obj.use_with_item(item)
self.events.dispatch_event(USE_OBJECT, object=obj, desc=desc)
return desc
def walk(self, room):
"""
Walks the user from the current room to the specified one.
Raises NotImplementedError if the room does not exist and NotLinked if the room is not linked.
:param room: Room to go to
:return: Room onenter string if everything is okay, a list with one item in a string of not
"""
# Gets the Room object if needed
if not isinstance(room, Room):
try:
room = self.rooms[str(room)]
except KeyError:
raise NotImplementedError
# Starts the music if the room has one
if room.music:
if not self.music_thread == room.music:
self._start_music_thread(room.music)
# Raise NotLinked if the room does not have a link to the specified one
if room.name not in self.links.get(self.current_room.name):
raise NotLinked
# Processes requirements
item_r = room.has_item_requirements(self.inv)
room_r = room.has_visit_requirement(self.visits)
if item_r or room_r:
self.events.dispatch_event(ENTER, fr=self.current_room, to=room, first_time=not room.entered)
if item_r == 1:
if room_r == 1: # Only if everything is fulfilled, return room description
desc = room.enter()
# Sets current room and the one you were just in
self.previous_room = self.current_room
self.current_room = room
self.visits.append(room)
return desc
else: # Return room deny message
return [room_r]
else: # Item is not correct, branch out
if room_r == 1: # If room requirements are okay, return only item deny message
return [item_r]
else: # Both are not fulfilled, return str of both
return [str(str(item_r) + "\n" + str(room_r))]
def go_back(self):
"""
Moves the player back to the previous room.
:return: Same as walk() method (Room on_enter string if everything is okay, a list with one item in a string of not)
"""
if not self.previous_room:
raise NotImplementedError
return self.walk(self.previous_room)
def ways(self):
"""
Returns a list of links (ways/paths) from the current room.
:return - list of links
"""
room = self.current_room
if not self.current_room or not isinstance(self.current_room, Room):
raise MissingParameters
try:
return self.links[room.name]
except KeyError:
return []
@staticmethod
def add_music(music, place):
"""
Adds music to be played when entering a room or interacting with a StaticObject.
Music does NOT stop playing when moving to a room without music!
:param music: str or Music
:param place: Room or StaticObject
:return: None
"""
if isinstance(music, str):
music = Music(music)
elif isinstance(music, Music):
pass
else:
raise InvalidParameters
if not (isinstance(place, Room) or isinstance(place, StaticObject)):
raise InvalidParameters
place.add_music(music)
@threaded
def _start_music_thread(self, music, repeat=True):
"""
Starts the music, stopping any existing threads.
:param music: Music
:return: None
"""
if not isinstance(music, Music):
raise InvalidParameters
try:
self.music_thread.stop()
except AttributeError:
pass
self.music_thread = music
self.last_music_thread = music
self.music_thread.__init__(self.music_thread.path)
self.events.dispatch_event(MUSIC_CHANGE, music=music, path=music.path)
self.music_thread.start(repeat)
def _save_game(self):
"""
Saves the current state of the game to save/name_of_the_game.save
:return: None
"""
if not self.saving:
self._init_save()
rooms = {k: r for k, r in self.rooms.items()}
items = {k: r for k, r in self.items.items()}
statics = {k: r for k, r in self.statics.items()}
inv = self.inv
visited = self.visits
data = {
"state": {"rooms": rooms, "items": items, "statics": statics, "inventory": inv, "visits": visited},
"game_info": {"name": self.name, "version": self.version}
}
self.saving.save(data)
def _init_save(self):
self.saving = SaveGame(self.name, self.version)
def _load_game(self):
if not self.saving:
self._init_save()
if self.saving.has_valid_save():
data = self.saving.load()
# has_valid_save() should check for this, but we check again
if data.get("game_info").get("version") != self.version:
log.warn("Game version is not the same even though has_valid_save() reported so! Not loading the save!")
return
game_info = data.get("game_info")
game_state = data.get("state")
if not game_info or not game_state:
log.error("Game save is corrupt.")
# User should delete the save him/herself.
return
self.rooms = game_state.get("rooms")
self.items = game_state.get("items")
self.statics = game_state.get("statics")
self.inv = game_state.get("inventory")
self.visits = game_state.get("visits")
self.current_room = game_state.get("rooms").get(self.current_room.name)
try:
self.previous_room = game_state.get("rooms").get(self.previous_room.name)
except AttributeError:
pass
# Shortcuts for convenience
Story = PaCInterpreter
|
remote.py
|
import RPi.GPIO as GPIO
import threading
from mpu6050_custom import mpu6050
from time import sleep
import client
class Remote:
STOP = False
data = []
def __init__(self, socket):
self.socket = socket
self.sensor = mpu6050(0x68)
self.output_data_thread = threading.Thread(target=self.send_data)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)
sleep(0.5)
self.sensor.calibrate()
GPIO.add_event_detect(17, GPIO.FALLING, self.readData, 200)
# Again my button is buggy so I couldn't test this out.
#GPIO.add_event_detect(27, GPIO.FALLING, self.stop, 500)
def printData(self):
while not self.STOP:
try:
d = self.data.pop(0)
print("X: %d" % d['x'], ", Y: %d" % d['y'], ", Z: %d" % d['z'])
except:
pass
sleep(0.2)
def send_data(self):
while not self.STOP:
if len(self.data) == 0:
continue
try:
self.socket.send(self.data.pop(0))
except IndexError:
print("Index Error %d" % len(self.data))
pass
except:
print("Socket Dropped")
self.stop()
sleep(0.15)
def readData(self, channel):
self.data.append(self.sensor.get_accel_data())
def RUN(self):
self.socket.connect()
self.sensor.enable_interrupt()
sleep(0.1)
self.output_data_thread.start()
while not self.STOP:
pass
else:
self.stop()
def stop(self):
self.STOP = True
GPIO.remove_event_detect(17)
#GPIO.remove_event_detect(27)
self.socket.close()
GPIO.cleanup()
del self.socket
|
manual_ai.py
|
from pathlib import Path
from threading import Thread
from typing import List
from drivebuildclient.AIExchangeService import AIExchangeService
from drivebuildclient.aiExchangeMessages_pb2 import SimulationID
def _handle_vehicle(sid: SimulationID, vid: str, requests: List[str]) -> None:
from drivebuildclient.aiExchangeMessages_pb2 import VehicleID, SimStateResponse, DataRequest, Control
from keyboard import is_pressed
vid_obj = VehicleID()
vid_obj.vid = vid
while True:
print(sid.sid + ": Test status: " + service.get_status(sid))
print(vid + ": Wait")
sim_state = service.wait_for_simulator_request(sid, vid_obj) # wait()
if sim_state is SimStateResponse.SimState.RUNNING:
print(vid + ": Request data")
request = DataRequest()
request.request_ids.extend(requests)
data = service.request_data(sid, vid_obj, request) # request()
# print(data)
if "egoFrontCamera" in data.data:
outfile = open("example.png", "wb")
outfile.write(data.data["egoFrontCamera"].camera.annotated)
outfile.close()
print(vid + ": Wait for control")
control = Control()
while not is_pressed("space"): # Wait for the user to trigger manual drive
pass
print(vid + ": Control")
if is_pressed("s"):
control.simCommand.command = Control.SimCommand.Command.SUCCEED
elif is_pressed("f"):
control.simCommand.command = Control.SimCommand.Command.FAIL
elif is_pressed("c"):
control.simCommand.command = Control.SimCommand.Command.CANCEL
else:
accelerate = 0
steer = 0
brake = 0
if is_pressed("up"):
accelerate = 1
if is_pressed("down"):
brake = 1
if is_pressed("right"):
steer = steer + 1
if is_pressed("left"):
steer = steer - 1
control.avCommand.accelerate = accelerate
control.avCommand.steer = steer
control.avCommand.brake = brake
service.control(sid, vid_obj, control) # control()
else:
print(sid.sid + ": The simulation is not running anymore (State: "
+ SimStateResponse.SimState.Name(sim_state) + ").")
print(sid.sid + ": Final result: " + service.get_result(sid))
break
if __name__ == "__main__":
service = AIExchangeService("defender.fim.uni-passau.de", 8383)
# Send tests
submissions = service.run_tests("test", "test", Path("../examples/StefanHuber"))
# sids = service.run_tests("test", "test", "../examples/HarishSwaminathanGopal")
# Interact with a simulation
if submissions:
submissions = submissions.submissions
else:
exit(1)
for _, sid in submissions.items():
ego_requests = ["egoPosition", "egoSpeed", "egoSteeringAngle", "egoFrontCamera", "egoLidar", "egoLaneDist", "egoRoadEdges"]
non_ego_requests = ["nonEgoPosition", "nonEgoSpeed", "nonEgoSteeringAngle", "nonEgoLeftCamera", "nonEgoLidar",
"nonEgoLaneDist"]
ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "ego", ego_requests))
ego_vehicle.start()
non_ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "nonEgo", non_ego_requests))
non_ego_vehicle.start()
ego_vehicle.join()
non_ego_vehicle.join()
|
asynchronous.py
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import time
import torch
import torch.multiprocessing as mp
from salina import Agent
from salina.workspace import Workspace, _SplitSharedWorkspace
def f(agent,in_queue,out_queue):
while True:
args=in_queue.get()
if args=="exit":
out_queue.put("ok")
return
workspace=Workspace()
with torch.no_grad():
agent(workspace,**args)
out_queue.put("ok")
for k in workspace.keys():
out_queue.put((k,workspace.get_full(k)))
out_queue.put("ok")
class AsynchronousAgent:
""" Implements an agent that is executed aynchronously in another process, and that returns its own workspace
Usage is:
* agent(workspace)
* while agent.is_running():
* .....
* workspace=agent.get_workspace()
"""
def __init__(self,agent):
""" Create the AsynchronousAgent
Args:
agent ([salina.Agent]): The agent to execute in another process
"""
self._is_running=False
self.process=None
self._workspace=None
self.agent=agent
def __call__(self,**kwargs):
""" Executes the agent in non-blocking mode. A new workspace is created by the agent.
"""
assert not self._is_running
if self.process is None:
self.o_queue = mp.Queue()
self.o_queue.cancel_join_thread()
self.i_queue = mp.Queue()
self.i_queue.cancel_join_thread()
self.process = mp.Process(
target=f, args=(self.agent, self.i_queue,self.o_queue)
)
self.process.daemon = False
self.process.start()
self._is_running=True
self.i_queue.put(kwargs)
def is_running(self):
""" Is the agent still running ?
Returns:
[bool]: True is the agent is running
"""
if self._is_running:
try:
r = self.o_queue.get(False)
assert r == "ok"
self._is_running = False
r = self.o_queue.get()
workspace=Workspace()
while(r!="ok"):
key,val=r
workspace.set_full(key,val)
r = self.o_queue.get()
self._workspace=workspace.to("cpu")
except:
pass
return self._is_running
def get_workspace(self):
""" Returns the built workspace is the agent has stopped its execution
Returns:
[salina.Workspace]: The built workspace
"""
if self.is_running():
return None
return self._workspace
def close(self):
""" Close the agent and kills the corresponding process
"""
if self.process is None:
return
print("[AsynchronousAgent] closing process")
self.i_queue.put("exit")
self.o_queue.get()
self.process.terminate()
self.process.join()
self.i_queue.close()
self.o_queue.close()
del self.i_queue
del self.o_queue
self.process = None
def __del__(self):
self.close()
|
RUtils.py
|
import os
import sys
import threading
import time
from selenium.webdriver.support.color import Color, Colors
import os
class tool:
"""
存放自定义的基础工具包
"""
_instance = None
def __init__(self):
pass
def __new__(cls, *args, **kwargs):
if tool._instance==None:
tool._instance=super().__new__(cls, *args, **kwargs)#调用上级的创建方法。
return tool._instance
@staticmethod
def bug():
Colors.pop()
assert 1/0
@staticmethod
def startNewThread(fun):
t1 = threading.Thread(target=fun, args=[]) # 开始服务器端的监听
t1.start()
return t1
def printColor(self,s="",fontColor='black',end="\n"):
"""打印出有颜色的字体。默认为黑色。打印后返回打印的值。
:param str s: 需要打印的内容
:param str fontColor: 颜色可以是以下几种 red | green | yellow | pink | blue | gray | black | cyan
:param end: 末尾的字符。(一般是\n或者空字符)
:return: 返回s的值
"""
glock=threading.Lock()
glock.acquire()
fontColorArr = { 'black': 30,'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'pink': 35, 'cyan':36 ,'gray': 37}
line=str(fontColorArr.get(fontColor))
if fontColorArr.get(fontColor) == None:
raise ValueError("传入的fontColor有问题!找不到该字体颜色:" + fontColor)
print('\033[0;' + line + 'm', s,end=end)
glock.release()
return line
def print(self,s,fontColor='blue',timeStrColor="red",siteColor="pink",path=None):
"""
默认字体为红色。背景色为白色
能够按照颜色在控制台打印出来。可以自定义背景色和字体颜色。下划线等
:param s:打印的内容
:param fontColor: (str) red | green | yellow | pink | blue| gray | black
:param timeStrColor: (str) red | green | yellow | blue | black
:param siteColor: (int) 0 普通模式 |
1 字体加粗 |
4 下划线 |
:return: None
"""
# print(sys._getframe(1).f_lineno)
# print(sys._getframe(1).f_code.co_filename)
# print(sys._getframe(1).f_code.co_name)
# print(sys._getframe(1).f_lineno)
# 1.打印时间
# 2.打印内容
# 3.打印位置
line=""
# line = "------------FILE:" + str(sys._getframe(1).f_code.co_filename) + "_____MODULE:" + str(
# sys._getframe(1).f_code.co_name) + "_____LINE:" + str(sys._getframe(1).f_lineno)
# 1.打印时间
self.printColor(s='[' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ']',fontColor=timeStrColor,end="")
# 2.打印内容
self.printColor(s=s, fontColor=fontColor,
end="")
# print(sys._getframe(1).f_code.co_name)
# print(sys._getframe(2).f_code.co_name)
# print(sys._getframe(3).f_code.co_name)
# print(sys._getframe(4).f_code.co_name)
line = "------------FILE:" + str(sys._getframe(1).f_code.co_filename) + "_____MODULE:" + str(
sys._getframe(1).f_code.co_name) + "_____LINE:" + str(sys._getframe(1).f_lineno)
# 3.打印位置
self.printColor(s=line,fontColor=siteColor,end="")
print('\033[0m')
# self.printColor()
if path!=None:
if os.path.isfile(path):
pass
else:
raise ValueError('保存路径异常:'+str(path)+'.不存在该文件!')
@staticmethod
def isBaseType(variate):
"""
判断该变量是不是基础类型
:param variate:
:return:
"""
type1 = ""
if type(variate) == type(1):
type1 = "int"
return True
elif type(variate) == type("str"):
type1 = "str"
return True
elif type(variate) == type(12.3):
type1 = "float"
return True
elif type(variate) == type([1]):
type1 = "list"
return True
elif type(variate) == type(()):
type1 = "tuple"
return True
elif type(variate) == type({"key1": "123"}):
type1 = "dict"
return True
elif type(variate) == type({"key1"}):
type1 = "set"
return True
return False
@staticmethod
def getType(data):
"""
获得其数据的类型,目前更新下面两种
1.json类型 json
2.文本类型 text
:param data:
:return: 上述类型
"""
data=str(data)
if data.startswith('{')&data.endswith("}"):
try:
data=eval(data)
if type(data)==type({}):
return "json"
else:
return "text"
except:
return "text"
else:
return "text"
if __name__ == '__main__':
tool().print("你好哦")
print(222)
tool().print("你好哦")
tool().print("你好哦")
print(111)
tool().print("你好哦")
|
hsms_if.py
|
# _*_ coding: utf-8 _*_
#@Time : 2020/8/14 上午 08:26
#@Author : cherish_peng
#@Email : 1058386071@qq.com
#@File : hsms_if.py
#@Software : PyCharm
from .item import Item
from .secsmessage import SecsMessage
from .errsecsmsg import ErrSecsMsg
import re
from .systembytegenerator import SystemByteGenerator
from .analysissml import sml
import threading
import time
from pathlib import Path
class Hsms_If:
def __init__(self,path,callback):
file = Path(path)
if not file.is_file():
raise Exception("the file({0}) isn't exsist".format(path))
self.callback = callback
self.sml =sml(path)
self.msgdic, self.namesfdic,self.sfwdic,self.iflist,self.trigger= self.sml.read_analysis_sml()
ErrSecsMsg.init_ErrSecsMsg(normsglist=list(self.sfwdic.keys()))
def everytriggerevent(self):
for name in self.trigger.keys():
if name not in self.msgdic:
raise Exception("triggerevent error : name {0} isn't exsist".format(name))
sf = self.namesfdic[name]
result = re.match(r'^S(\d{1,3})F(\d{1,3})$',sf)
if not result:
raise Exception('the value of Stream and Function get fail')
s = int(result.groups()[0])
f = int(result.groups()[1])
everytime = int(self.trigger[name])
item = Item.dictoitem({name:self.msgdic[name]})
replyexpected = self.sfwdic[self.namesfdic[name]]
msg = SecsMessage(item=item,name=name,s=s,f=f,replyexpected=replyexpected)
thread = threading.Thread(target=self.senddatamessage,args=(msg,everytime))
thread.setDaemon(True)
thread.start()
def iftriggerevent(self,msg:SecsMessage):
for condition in self.iflist:
if self.istriggerevent(msg=msg,condition=condition):
if condition[1] in self.msgdic.keys():
result = re.match(r'^S(\d{1,2})F(\d{1,2})$', self.namesfdic[condition[1]])
if not result:
return
s = int(result.groups()[0])
f = int(result.groups()[1])
item = Item.dictoitem({condition[1]:self.msgdic[condition[1]]})
replyexpected = self.sfwdic[self.namesfdic[condition[1]]]
if replyexpected:
self.callback(SecsMessage(item=item, name=condition[1], s=s, f=f, replyexpected=replyexpected))
else:
self.callback(SecsMessage(item=item, name=condition[1], s=s, f=f, replyexpected=replyexpected,systembyte=msg.systembyte))
def istriggerevent(self,msg:SecsMessage,condition):
conditionstr = condition[0]
result = re.match(r'(.*?) *\( *(\d{0,2}) *\) *(<|>|==|<=|>=) *<(.*?) +(.*?)>', conditionstr)
if result:
tup = result.groups()
sf = tup[0].upper()
if sf not in self.sfwdic:
return False
#raise Exception('the condition({0}) of iftriggerevent err'.format(conditionstr))
if sf != 'S{0}F{1}'.format(msg.s,msg.f):
return False
index = int(tup[1])
cal = tup[2]
value = None
if 'F' in tup[3]:
value = round(float(tup[4]),5)
elif re.match(r'I\d.*?|U\d.*?', tup[3]):
value = int(tup[4])
elif re.match(r'A.*?|Boolean.*?|B.*?', tup[3]):
value = tup[4].strip('"').strip("'")
if cal == '==':
if value == msg.get_itemvaluebyindex(index):
return True
elif cal == '<':
if value < msg.get_itemvaluebyindex(index):
return True
elif cal == '>':
if value > msg.get_itemvaluebyindex(index):
return True
elif cal == '<=':
if value <= msg.get_itemvaluebyindex(index):
return True
elif cal == '>=':
if value >= msg.get_itemvaluebyindex(index):
return True
else:
conditionstr = conditionstr.upper()
if conditionstr not in self.sfwdic:
return False
#raise Exception('the condition({0}) of iftriggerevent err'.format(conditionstr))
if conditionstr == 'S{0}F{1}'.format(msg.s,msg.f):
return True
return False
def senddatamessage(self,msg:SecsMessage,everytime):
while True:
try:
msg = SecsMessage(item=msg.secsitem, name=msg.name, s=msg.s, f=msg.f, replyexpected=msg.replyexpected)
self.callback(msg)
except Exception as e:
raise e
time.sleep(everytime)
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_dash.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_dash.bip32 import BIP32Node
from electrum_dash import constants
from electrum_dash.dash_tx import to_varbytes, serialize_extra_payload
from electrum_dash.i18n import _
from electrum_dash.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_dash.keystore import Hardware_KeyStore
from electrum_dash.plugin import Device, runs_in_hwd_thread
from electrum_dash.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Dash Testnet" if constants.net.TESTNET else "Dash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not txout.is_ps_ks and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
if t.version > 2:
tx_type = tx.tx_type
if tx_type:
t.extra_data = to_varbytes(serialize_extra_payload(tx))
t.version |= tx_type << 16
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
http.py
|
import base64
import json
import sys
import threading
import web
import nia as NIA
urls = (
'/', 'index',
'/get_steps', 'get_steps'
)
# global scope stuff
nia = None
nia_data = None
class index:
def GET(self):
render = web.template.render("templates/")
return render.index()
class get_steps:
def GET(self):
web.header("Content-Type", "application/json")
data = {
"brain_fingers": web.brain_fingers
}
return json.dumps(data)
class Updater:
def update(self):
while True:
# kick-off processing data from the NIA
data_thread = threading.Thread(target=nia_data.get_data)
data_thread.start()
# get the fourier data from the NIA
data, steps = nia_data.fourier(nia_data)
web.brain_fingers = steps
# wait for the next batch of data to come in
data_thread.join()
# exit if we cannot read data from the device
if nia_data.AccessDeniedError:
sys.exit(1)
if __name__ == "__main__":
app = web.application(urls, globals())
# open the NIA, or exit with a failure code
nia = NIA.NIA()
if not nia.open():
sys.exit(1)
# start collecting data
milliseconds = 50
nia_data = NIA.NiaData(nia, milliseconds)
# kick-off processing data from the NIA
updater = Updater()
update_thread = threading.Thread(target=updater.update)
update_thread.start()
# run the app
app.run()
# when web.py exits, close out the NIA and exit gracefully
nia.close()
sys.exit(0)
|
test_h5store.py
|
# Copyright (c) 2018 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import os
import sys
import pytest
import random
import string
import subprocess
import platform
from itertools import chain
from array import array
from contextlib import contextmanager
from time import time
from functools import partial
from platform import python_implementation
from multiprocessing.pool import ThreadPool
from contextlib import closing
from tempfile import TemporaryDirectory
from collections.abc import Mapping
from signac.core.h5store import H5Store, H5StoreClosedError, H5StoreAlreadyOpenError
from signac.errors import InvalidKeyError
PYPY = 'PyPy' in platform.python_implementation()
try:
import h5py # noqa
H5PY = True
except ImportError:
H5PY = False
try:
import pandas # noqa
import tables # noqa
PANDAS_AND_TABLES = True
except ImportError:
PANDAS_AND_TABLES = False
try:
import numpy # noqa
NUMPY = True
except ImportError:
NUMPY = False
FN_STORE = 'signac_test_h5store.h5'
WINDOWS = (sys.platform == 'win32')
@pytest.mark.skipif(not H5PY, reason='test requires the h5py package')
@pytest.mark.skipif(PYPY, reason='h5py not reliable on PyPy platform')
class TestH5StoreBase():
@pytest.fixture(autouse=True)
def setUp_base_h5Store(self, request):
self._tmp_dir = TemporaryDirectory(prefix='signac_test_h5store_')
request.addfinalizer(self._tmp_dir.cleanup)
self._fn_store = os.path.join(self._tmp_dir.name, FN_STORE)
self._fn_store_other = os.path.join(self._tmp_dir.name, 'other_' + FN_STORE)
def get_h5store(self, **kwargs):
return H5Store(filename=self._fn_store, **kwargs)
@contextmanager
def open_h5store(self, **kwargs):
with self.get_h5store(**kwargs) as h5s:
yield h5s
def get_other_h5store(self, **kwargs):
return H5Store(filename=self._fn_store_other, **kwargs)
@contextmanager
def open_other_h5store(self, **kwargs):
with self.get_other_h5store(**kwargs) as h5s:
yield h5s
def get_testdata(self, size=None):
if size is None:
size = 1024
return ''.join([random.choice(string.ascii_lowercase) for i in range(size)])
def assertEqual(self, a, b):
if hasattr(a, 'shape'):
if not NUMPY:
raise pytest.skip("This test requires the numpy package.")
numpy.testing.assert_array_equal(a, b)
else:
assert a == b
class TestH5StoreOpen(TestH5StoreBase):
def test_open(self):
h5s = self.get_h5store()
h5s.open()
h5s.close()
def test_open_read_only(self):
with self.open_h5store() as h5s:
h5s['foo'] = 'bar'
with self.open_h5store(mode='r') as h5s:
assert 'foo' in h5s
self.assertEqual(h5s['foo'], 'bar')
def test_open_write_only(self):
with self.open_h5store(mode='w') as h5s:
h5s['foo'] = 'bar'
assert 'foo' in h5s
self.assertEqual(h5s['foo'], 'bar')
def test_open_write_and_read_only(self):
with self.open_h5store(mode='w') as h5s_w:
with self.open_h5store(mode='r') as h5s_r:
assert 'foo' not in h5s_r
assert 'foo' not in h5s_w
h5s_w['foo'] = 'bar'
assert 'foo' in h5s_r
self.assertEqual(h5s_r['foo'], 'bar')
assert 'foo' in h5s_w
self.assertEqual(h5s_r['foo'], 'bar')
class TestH5Store(TestH5StoreBase):
valid_types = {
'int': 123,
'float': 123.456,
'string': 'foobar',
'none': None,
'float_array': array('f', [-1.5, 0, 1.5]),
'double_array': array('d', [-1.5, 0, 1.5]),
'int_array': array('i', [-1, 0, 1]),
'uint_array': array('I', [0, 1, 2]),
'dict': {
'a': 1,
'b': None,
'c': 'test',
},
}
if NUMPY:
valid_types.update({
'numpy_float_array': numpy.array([-1.5, 0, 1.5], dtype=float),
'numpy_int_array': numpy.array([-1, 0, 1], dtype=int),
})
def test_init(self):
self.get_h5store()
def test_invalid_filenames(self):
with pytest.raises(ValueError):
H5Store(None)
with pytest.raises(ValueError):
H5Store('')
with pytest.raises(ValueError):
H5Store(123)
def test_set_get(self):
with self.open_h5store() as h5s:
key = 'setget'
d = self.get_testdata()
h5s.clear()
assert not bool(h5s)
assert len(h5s) == 0
assert key not in h5s
with pytest.raises(KeyError):
h5s[key]
d_ = h5s[key] = d
self.assertEqual(d_, d)
assert bool(h5s)
assert len(h5s) == 1
assert key in h5s
self.assertEqual(h5s[key], d)
self.assertEqual(h5s.get(key), d)
assert h5s.get('nonexistent', 'default') == 'default'
def test_set_get_explicit_nested(self):
with self.open_h5store() as h5s:
key = 'setgetexplicitnested'
d = self.get_testdata()
assert 'a' not in h5s
ret = h5s.setdefault('a', dict())
assert 'a' in h5s
self.assertEqual(ret, h5s['a'])
assert hasattr(ret, '_store') # is an H5Group object
child1 = h5s['a']
child2 = h5s['a']
self.assertEqual(child1, child2)
assert type(child1) == type(child2)
assert not child1
assert not child2
child1[key] = d
assert child1
assert child2
assert key in child1
assert key in child2
self.assertEqual(child1, child2)
self.assertEqual(child1[key], d)
self.assertEqual(child2[key], d)
def test_repr(self):
with self.open_h5store() as h5s:
key = 'test_repr'
assert repr(h5s) == repr(eval(repr(h5s)))
h5s[key] = self.get_testdata()
assert repr(h5s) == repr(eval(repr(h5s)))
def test_str(self):
with self.open_h5store() as h5s:
key = 'test_repr'
h5s[key] = self.get_testdata()
str(h5s) # open
str(h5s) # closed
def test_len(self):
h5s = self.get_h5store()
assert len(h5s) == 0
h5s['test_len'] = True
assert len(h5s) == 1
def test_contains(self):
h5s = self.get_h5store()
assert 'test_contains' not in h5s
h5s['test_contains'] = True
assert 'test_contains' in h5s
def test_copy_value(self):
with self.open_h5store() as h5s:
key = 'copy_value'
key2 = 'copy_value2'
d = self.get_testdata()
h5s[key] = d
assert key in h5s
self.assertEqual(h5s[key], d)
assert key2 not in h5s
h5s[key2] = h5s[key]
assert key in h5s
self.assertEqual(h5s[key], d)
assert key2 in h5s
self.assertEqual(h5s[key2], d)
def test_iter(self):
with self.open_h5store() as h5s:
key1 = 'iter1'
key2 = 'iter2'
d1 = self.get_testdata()
d2 = self.get_testdata()
d = {key1: d1, key2: d2}
h5s.update(d)
assert key1 in h5s
assert key2 in h5s
for i, key in enumerate(h5s):
assert key in d
self.assertEqual(d[key], h5s[key])
assert i == 1
def test_delete(self):
with self.open_h5store() as h5s:
key = 'delete'
d = self.get_testdata()
h5s[key] = d
assert len(h5s) == 1
self.assertEqual(h5s[key], d)
del h5s[key]
assert len(h5s) == 0
with pytest.raises(KeyError):
h5s[key]
def test_update(self):
with self.open_h5store() as h5s:
key = 'update'
d = {key: self.get_testdata()}
h5s.update(d)
assert len(h5s) == 1
self.assertEqual(h5s[key], d[key])
def test_clear(self):
with self.open_h5store() as h5s:
h5s.clear()
key = 'clear'
d = self.get_testdata()
h5s[key] = d
assert len(h5s) == 1
self.assertEqual(h5s[key], d)
h5s.clear()
assert len(h5s) == 0
def test_reopen(self):
with self.open_h5store() as h5s:
key = 'reopen'
d = self.get_testdata()
h5s[key] = d
with self.open_h5store() as h5s:
assert len(h5s) == 1
self.assertEqual(h5s[key], d)
def test_open_twice(self):
h5s = self.get_h5store()
h5s.open()
try:
with pytest.raises(H5StoreAlreadyOpenError):
h5s.open()
finally:
h5s.close()
def test_open_reentry(self):
with self.open_h5store() as h5s:
with h5s:
pass
def test_reopen_explicit_open_close(self):
h5s = self.get_h5store().open()
key = 'reopen'
d = self.get_testdata()
h5s[key] = d
h5s.close()
h5s.open()
assert len(h5s) == 1
self.assertEqual(h5s[key], d)
h5s.close()
def test_write_valid_types(self):
with self.open_h5store() as h5s:
for k, v in self.valid_types.items():
h5s[k] = v
self.assertEqual(h5s[k], v)
def test_assign_valid_types_within_identical_file(self):
with self.open_h5store() as h5s:
for k, v in self.valid_types.items():
h5s[k] = v
self.assertEqual(h5s[k], v)
h5s[k] = h5s[k]
self.assertEqual(h5s[k], v)
k_other = k + '-other'
h5s[k_other] = h5s[k]
self.assertEqual(h5s[k], v)
self.assertEqual(h5s[k_other], v)
self.assertEqual(h5s[k], h5s[k_other])
def test_assign_valid_types_within_same_file(self):
with self.open_h5store() as h5s:
with self.open_h5store() as same_h5s:
for k, v in self.valid_types.items():
# First, store v under k
h5s[k] = v
self.assertEqual(h5s[k], v)
# Assign the same value under the same key.
try:
same_h5s[k] = h5s[k]
except H5StoreClosedError:
pass
self.assertEqual(h5s[k], v)
self.assertEqual(same_h5s[k], v)
self.assertEqual(h5s[k], same_h5s[k])
# Assign the same value, under a different key.
other_key = k + '-other'
try:
same_h5s[other_key] = h5s[k]
except H5StoreClosedError:
pass
self.assertEqual(h5s[other_key], v)
self.assertEqual(same_h5s[other_key], v)
self.assertEqual(h5s[k], same_h5s[other_key])
# Deleting the value assigned to the alternative key should have
# no effect on the value stored under the original key, regardless
# whether it was copied by reference (hard-link) or copied by
# value (true copy).
del same_h5s[other_key]
self.assertEqual(h5s[k], v)
self.assertEqual(same_h5s[k], v)
self.assertEqual(same_h5s[k], h5s[k])
def test_assign_valid_types_between_files(self):
with self.open_h5store() as h5s:
with self.open_other_h5store() as other_h5s:
for k, v in self.valid_types.items():
h5s[k] = v
self.assertEqual(h5s[k], v)
try:
other_h5s[k] = h5s[k]
except (OSError, RuntimeError) as error:
# Type of error may depend on platform or software versions
assert str(
error) == "Unable to create link (interfile hard links are not allowed)"
assert isinstance(v, (array, numpy.ndarray))
other_h5s[k] = h5s[k][()]
self.assertEqual(h5s[k], v)
self.assertEqual(other_h5s[k], v)
self.assertEqual(other_h5s[k], h5s[k])
def test_write_invalid_type(self):
class Foo(object):
pass
with self.open_h5store() as h5s:
key = 'write_invalid_type'
d = self.get_testdata()
h5s[key] = d
assert len(h5s) == 1
self.assertEqual(h5s[key], d)
d2 = Foo()
with pytest.raises(TypeError):
h5s[key + '2'] = d2
assert len(h5s) == 1
self.assertEqual(h5s[key], d)
def test_keys_with_dots(self):
with pytest.raises(InvalidKeyError):
with self.open_h5store() as h5s:
key = 'a.b'
d = self.get_testdata()
h5s[key] = d
self.assertEqual(h5s[key], d)
def test_keys_with_slashes(self):
# HDF5 uses slashes for nested keys internally
with self.open_h5store() as h5s:
key = 'a/b'
d = self.get_testdata()
h5s[key] = d
self.assertEqual(h5s[key], d)
self.assertEqual(h5s['a']['b'], d)
def test_value_none(self):
with self.get_h5store() as h5s:
key = 'a'
d = None
h5s[key] = d
self.assertEqual(h5s[key], d)
def test_set_get_attr_sync(self):
with self.get_h5store() as h5s:
assert len(h5s) == 0
assert 'a' not in h5s
with pytest.raises(AttributeError):
h5s.a
a = 0
h5s.a = a
assert len(h5s) == 1
assert 'a' in h5s
self.assertEqual(h5s.a, a)
self.assertEqual(h5s['a'], a)
a = 1
h5s.a = a
assert len(h5s) == 1
assert 'a' in h5s
self.assertEqual(h5s.a, a)
self.assertEqual(h5s['a'], a)
def check_nested(a, b):
assert len(h5s) == 1
assert len(h5s.a) == 1
assert 'a' in h5s
assert 'b' in h5s.a
self.assertEqual(h5s.a, a)
self.assertEqual(h5s['a']['b'], b)
self.assertEqual(h5s.a.b, b)
self.assertEqual(h5s['a'], a)
h5s.a = {'b': 0}
check_nested({'b': 0}, 0)
h5s.a.b = 1
check_nested({'b': 1}, 1)
h5s['a'] = {'b': 2}
check_nested({'b': 2}, 2)
h5s['a']['b'] = 3
check_nested({'b': 3}, 3)
def test_modify_nested(self):
with self.get_h5store() as h5s:
h5s.a = dict(b=True)
a = h5s.a
a['b'] = False
assert not h5s.a['b']
def test_invalid_attr(self):
h5s = self.get_h5store()
with pytest.raises(AttributeError):
h5s.a
with pytest.raises(AttributeError):
h5s._a
with pytest.raises(AttributeError):
h5s.__a__
def test_attr_reference_modification(self):
with self.get_h5store() as h5s:
assert len(h5s) == 0
assert 'a' not in h5s
with pytest.raises(AttributeError):
h5s.a
pairs = [(0, 1), (0.0, 1.0), ('0', '1'), (False, True)]
dict_pairs = [(dict(c=a), dict(c=b)) for a, b in pairs]
for A, B in chain(pairs, dict_pairs):
h5s.a = A
a = h5s.a
self.assertEqual(a, A)
self.assertEqual(h5s.a, A)
a = B
self.assertEqual(a, B)
self.assertEqual(h5s.a, A)
a = h5s['a']
self.assertEqual(a, A)
self.assertEqual(h5s.a, A)
a = B
self.assertEqual(a, B)
self.assertEqual(h5s.a, A)
# with nested values
h5s['a'] = dict(b=A)
self.assertEqual(h5s.a.b, A)
b = h5s.a.b
self.assertEqual(b, A)
self.assertEqual(h5s.a.b, A)
b = B
self.assertEqual(b, B)
self.assertEqual(h5s.a.b, A)
b = h5s['a']['b']
self.assertEqual(b, A)
self.assertEqual(h5s.a.b, A)
b = B
self.assertEqual(b, B)
self.assertEqual(h5s.a.b, A)
b = h5s['a'].b
self.assertEqual(b, A)
self.assertEqual(h5s.a.b, A)
b = B
self.assertEqual(b, B)
self.assertEqual(h5s.a.b, A)
class TestH5StoreNestedData(TestH5Store):
def get_testdata(self, size=None):
return dict(a=super(TestH5StoreNestedData, self).get_testdata(size))
def test_repr(self):
from signac.core.h5store import H5Store, H5Group # noqa:F401
with self.open_h5store() as h5s:
key = 'test_repr'
assert repr(h5s) == repr(eval(repr(h5s)))
h5s[key] = self.get_testdata()
assert repr(h5s[key]) == repr(eval(repr(h5s[key])))
assert repr(h5s) == repr(eval(repr(h5s)))
class TestH5StoreBytesData(TestH5Store):
def get_testdata(self, size=None):
return super(TestH5StoreBytesData, self).get_testdata(size=size).encode()
class TestH5StoreClosed(TestH5Store):
valid_types = {
'int': 123,
'float': 123.456,
'string': 'foobar',
'none': None,
'dict': {
'a': 1,
'b': None,
'c': 'test',
},
}
@contextmanager
def open_h5store(self, **kwargs):
yield self.get_h5store(**kwargs)
class TestH5StoreNestedDataClosed(TestH5StoreNestedData, TestH5StoreClosed):
pass
@pytest.mark.skipif(not PANDAS_AND_TABLES, reason='requires pandas and pytables')
@pytest.mark.skipif(not NUMPY, reason='requires numpy package')
class TestH5StorePandasData(TestH5Store):
def get_testdata(self, size=None):
if size is None:
size = 1024
return pandas.DataFrame(
numpy.random.rand(8, size), index=[string.ascii_letters[i] for i in range(8)])
def assertEqual(self, a, b):
if isinstance(a, Mapping):
assert isinstance(b, Mapping)
super(TestH5StorePandasData, self).assertEqual(a.keys(), b.keys())
for key in a:
self.assertEqual(a[key], b[key])
else:
try:
return (a == b).all()
except (AttributeError, ValueError):
return super(TestH5StorePandasData, self).assertEqual(a, b)
else:
assert isinstance(a, pandas.DataFrame)
@pytest.mark.skipif(not PANDAS_AND_TABLES, reason='requires pandas and pytables')
@pytest.mark.skipif(not NUMPY, reason='requires numpy package')
class TestH5StoreNestedPandasData(TestH5StorePandasData):
def get_testdata(self, size=None):
if size is None:
size = 1024
return dict(df=pandas.DataFrame(
numpy.random.rand(8, size), index=[string.ascii_letters[i] for i in range(8)]))
class TestH5StoreMultiThreading(TestH5StoreBase):
@pytest.mark.skip(reason="This test fails randomly on CI. "
"See https://github.com/glotzerlab/signac/pull/307")
def test_multithreading(self):
def set_x(x):
self.get_h5store()['x'] = x
with closing(ThreadPool(2)) as pool:
pool.map(set_x, range(100))
pool.join()
assert self.get_h5store()['x'] in set(range(100))
@pytest.mark.skip(reason="This test fails randomly on CI. "
"See https://github.com/glotzerlab/signac/pull/307")
def test_multithreading_with_error(self):
def set_x(x):
self.get_h5store()['x'] = x
if x == 50:
raise RuntimeError()
with pytest.raises(RuntimeError):
with closing(ThreadPool(2)) as pool:
pool.map(set_x, range(100))
pool.join()
assert self.get_h5store()['x'] in set(range(100))
def _read_from_h5store(filename, **kwargs):
from signac.core.h5store import H5Store
with H5Store(filename, **kwargs) as h5s:
list(h5s)
class TestH5StoreMultiProcessing(TestH5StoreBase):
def test_single_writer_multiple_reader_same_process(self):
with self.open_h5store() as writer:
with self.open_h5store(): # second writer
with self.open_h5store(mode='r') as reader1:
with self.open_h5store(mode='r') as reader2:
writer['test'] = True
assert writer['test']
assert reader1['test']
assert reader2['test']
@pytest.mark.skipif(WINDOWS, reason='This test fails for an unknown reason on Windows.')
def test_single_writer_multiple_reader_same_instance(self):
from multiprocessing import Process
def read():
p = Process(target=_read_from_h5store, args=(self._fn_store,), kwargs=(dict(mode='r')))
p.start()
p.join()
# Ensure the process succeeded
assert p.exitcode == 0
with self.open_h5store() as writer:
read()
writer['test'] = True
read()
def test_multiple_reader_different_process_no_swmr(self):
read_cmd = (r'python -c "from signac.core.h5store import H5Store; '
r'h5s = H5Store({}, mode=\"r\"); list(h5s); '
r'h5s.close()"').format(repr(self._fn_store))
with self.open_h5store():
pass # create file
try:
with self.open_h5store(mode='r'): # single reader
subprocess.check_output(read_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
print('\n', error.output.decode(), file=sys.stderr)
raise
def test_single_writer_multiple_reader_different_process_no_swmr(self):
read_cmd = (r'python -c "from signac.core.h5store import H5Store; '
r'h5s = H5Store({}, mode=\"r\"); list(h5s); '
r'h5s.close()"').format(repr(self._fn_store))
with self.open_h5store(): # single writer
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output(read_cmd, shell=True, stderr=subprocess.DEVNULL)
@pytest.mark.skipif(python_implementation() != 'CPython', reason='SWMR mode not available.')
def test_single_writer_multiple_reader_different_process_swmr(self):
read_cmd = (r'python -c "from signac.core.h5store import H5Store; '
r'h5s = H5Store({}, mode=\"r\", swmr=True); list(h5s); '
r'h5s.close()"').format(repr(self._fn_store))
with self.open_h5store(libver='latest') as writer:
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output(read_cmd, shell=True, stderr=subprocess.DEVNULL)
try:
with self.open_h5store(libver='latest') as writer:
writer.file.swmr_mode = True
subprocess.check_output(read_cmd, shell=True, stderr=subprocess.STDOUT)
writer['test'] = True
subprocess.check_output(read_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
print('\n', error.output.decode(), file=sys.stderr)
raise
@pytest.mark.skipif(not NUMPY, reason='requires numpy package')
@pytest.mark.skipif(python_implementation() != 'CPython', reason='Optimized for CPython.')
class TestH5StorePerformance(TestH5StoreBase):
max_slowdown_vs_native_factor = 1.25
@pytest.fixture
def setUp(self, setUp_base_h5Store):
value = self.get_testdata()
times = numpy.zeros(200)
for i in range(len(times)):
start = time()
with h5py.File(self._fn_store, mode='a') as h5file:
if i:
del h5file['_baseline']
h5file.create_dataset('_baseline', data=value, shape=None)
times[i] = time() - start
self.baseline_time = times
def assertSpeed(self, times):
msg = "\n{:>10}\t{:>8}\t{:>8}\t{:>4}\n".format("", "Measurement", "Benchmark", "Factor")
def format_row(text, reducer):
return "{:<10}\t{:.2e}\t{:.2e}\t{:.3}\n".format(
text, reducer(times), reducer(self.baseline_time),
reducer(times) / reducer(self.baseline_time))
msg += format_row('mean', numpy.mean)
msg += format_row('median', numpy.median)
msg += format_row('25 percentile', partial(numpy.percentile, q=25))
msg += format_row('75 percentile', partial(numpy.percentile, q=75))
assert numpy.percentile(times, 25) / numpy.percentile(self.baseline_time, 75) < \
self.max_slowdown_vs_native_factor, msg
@pytest.mark.skipif(WINDOWS, reason='This test fails for an unknown reason on Windows.')
@pytest.mark.skip(reason="This test fails randomly on CI. "
"See https://github.com/glotzerlab/signac/pull/307")
def test_speed_get(self, setUp):
times = numpy.zeros(200)
key = 'test_speed_get'
value = self.get_testdata()
self.get_h5store()[key] = value
self.assertEqual(self.get_h5store()[key], value) # sanity check
for i in range(len(times)):
start = time()
self.get_h5store()[key]
times[i] = time() - start
self.assertSpeed(times)
@pytest.mark.skipif(WINDOWS, reason='This test fails for an unknown reason on Windows.')
@pytest.mark.skip(reason="This test fails randomly on CI. "
"See https://github.com/glotzerlab/signac/pull/307")
def test_speed_set(self, setUp):
times = numpy.zeros(200)
key = 'test_speed_set'
value = self.get_testdata()
for i in range(len(times)):
start = time()
self.get_h5store()[key] = value
times[i] = time() - start
self.assertEqual(self.get_h5store()[key], value) # sanity check
self.assertSpeed(times)
class TestH5StorePerformanceNestedData(TestH5StorePerformance):
max_slowdown_vs_native_factor = 1.75
def get_testdata(self, size=None):
return dict(a=super(TestH5StorePerformanceNestedData, self).get_testdata(size))
@pytest.fixture
def setUp(self, setUp_base_h5Store):
value = TestH5StorePerformance.get_testdata(self)
times = numpy.zeros(200)
for i in range(len(times)):
start = time()
with h5py.File(self._fn_store, mode='a') as h5file:
if i:
del h5file['_basegroup']
h5file.create_group('_basegroup').create_dataset(
'_baseline', data=value, shape=None)
times[i] = time() - start
self.baseline_time = times
|
minicap.py
|
# -*- coding: utf-8 -*-
from Banner import Banner
from queue import Queue
import socket
import threading
#from itsdangerous import bytes_to_int
class Stream(object):
__instance = None
__mutex = threading.Lock()
def __init__(self, ip="127.0.0.1", port=1313, queue=Queue()):
self.IP = ip
self.PORT = port
self.banner = Banner()
self.ReadImageStreamTask = None
self.picture = queue
@staticmethod
def getBuilder(ip, port, queue):
"""Return a single instance of TestBuilder object """
if (Stream.__instance == None):
Stream.__mutex.acquire()
if (Stream.__instance == None):
Stream.__instance = Stream(ip, port, queue)
Stream.__mutex.release()
return Stream.__instance
def run(self):
self.minicapSocket= socket.socket(socket.AF_INET,socket.SOCK_STREAM) #定义socket类型,网络通信,TCP
self.minicapSocket.connect((self.IP,self.PORT))
self.ReadImageStreamTask = threading.Thread(target=self.ReadImageStream).start()
def ReadImageStream(self):
readBannerBytes = 0
bannerLength = 2
readFrameBytes = 0
frameBodyLength = 0
frameBody = b""
while True:
chunk = self.minicapSocket.recv(1024)
chunk_len = len(chunk)
if not chunk_len:
continue
cursor = 0
while cursor < chunk_len:
if readBannerBytes < bannerLength:
if readBannerBytes == 0:
self.banner.Version = chunk[cursor]
elif readBannerBytes == 1:
bannerLength = chunk[cursor]
print(bannerLength)
self.banner.Length = bannerLength
elif readBannerBytes in (2, 3, 4, 5):
self.banner.Pid += chunk[cursor] << (readBannerBytes - 2) * 8
elif readBannerBytes in (6, 7 ,8, 9):
self.banner.RealWidth += chunk[cursor] << (readBannerBytes - 6) * 8
elif readBannerBytes in (10, 11, 12, 13):
self.banner.RealHeight += chunk[cursor] << (readBannerBytes - 10) * 8
elif readBannerBytes in (14, 15, 16, 17):
self.banner.VirtualWidth += chunk[cursor] << (readBannerBytes - 14) * 8
elif readBannerBytes in (18, 19, 20, 21):
self.banner.VirtualHeight += chunk[cursor] << (readBannerBytes - 18) * 8
elif readBannerBytes == 22:
self.banner.Orientation += chunk[cursor]
elif readBannerBytes == 23:
self.banner.Quirks = chunk[cursor]
cursor += 1
readBannerBytes += 1
if readBannerBytes == bannerLength:
print(self.banner.toString())
elif readFrameBytes < 4:
frameBodyLength += chunk[cursor] << readFrameBytes * 8
cursor += 1
readFrameBytes += 1
else:
if chunk_len - cursor >= frameBodyLength:
frameBody += chunk[cursor:cursor + frameBodyLength]
cursor += frameBodyLength
self.picture.put(frameBody)
frameBodyLength = readFrameBytes = 0
frameBody = b""
else:
frameBody += chunk[cursor:chunk_len]
frameBodyLength -= chunk_len - cursor
readFrameBytes += chunk_len - cursor
cursor = chunk_len
# print a.picture
|
astra.py
|
import argparse
import base64
import json
import requests
import time
import ast
import utils.logger as logger
import utils.logs as logs
from urllib.parse import urlparse
import hashlib
import webbrowser
import re
from core.zapscan import *
from core.parsers import *
from utils.logger import *
from core.login import APILogin
from utils.logger import logger
from utils.config import update_value,get_value,get_allvalues
from modules.cors import cors_main
from modules.auth import auth_check
from modules.rate_limit import rate_limit
from modules.csrf import csrf_check
from modules.jwt_attack import jwt_check
from modules.sqli import sqli_check
from modules.xss import xss_check
from modules.redirect import open_redirect_check
from modules.xxe import xxe_scan
from modules.crlf import crlf_check
from modules.security_headers_missing import security_headers_missing
from core.zap_config import zap_start
from multiprocessing import Process
from utils.db import Database_update
from utils.email_cron import email_start_cron
if os.getcwd().split('/')[-1] != 'API':
from API.api import main
dbupdate = Database_update()
def parse_collection(collection_name,collection_type):
if collection_type == 'Postman':
parse_data.postman_parser(collection_name)
else:
print("[-]Failed to Parse collection")
sys.exit(1)
def scan_postman_collection(file_name,scanid,new_url=None):
# Read and parse postman collection file
try:
parse_data = PostmanParser()
parse_data.postman_parser(file_name)
for data in parse_data.api_lst:
try:
url = data['url']['raw']
except:
url = data['url']
headers,method,body = data['headers'],data['method'],''
if headers:
try:
headers = add_headers(headers)
except:
pass
if data['body'] != '':
body = json.loads(base64.b64decode(data['body']))
if new_url != None and new_url != "NA":
uri = url[[m.start() for m in re.finditer('/',url)][2]: ]
new_url = new_url+uri
else:
new_url = url
p = Process(target=modules_scan,args=(new_url,method,headers,body,scanid),name='module-scan')
p.start()
email_start_cron()
return True
except:
return False
def scan_complete():
print("[+]Scan has been completed")
webbrowser.open("http://127.0.0.1:8094/reports.html#"+scanid)
while True:
pass
def generate_scanid():
global scanid
scanid = hashlib.md5(str(time.time())).hexdigest()
return scanid
def add_headers(headers):
# This function deals with adding custom header and auth value .
auth_type = get_value('config.property','login','auth_type')
if auth_type == 'cookie':
cookie = get_value('config.property','login','cookie')
if cookie:
cookie_dict = ast.literal_eval(cookie)
cookie_header = {'Cookie': cookie_dict['cookie']}
headers.update(cookie_header)
else:
auth_success = get_value('config.property','login','auth_success')
if auth_success == 'Y':
auth_success_token = get_value('config.property','login','auth_success_token')
#auth_request_header = get_value('config.property','login','auth_request_token')
auth_success_param = get_value('config.property','login','auth_success_param')
auth_header = {auth_success_param: auth_success_token }
headers.update(auth_header)
try:
custom_header = get_value('config.property','login','headers')
custom_header = ast.literal_eval(custom_header)
headers.update(custom_header)
except:
pass
return headers
def read_scan_policy():
try:
scan_policy = get_value('scan.property','scan-policy','attack')
attack = ast.literal_eval(scan_policy)
except Exception as e:
print(e)
return attack
def update_scan_status(scanid, module_name=None, count=None):
#Update scanning status and total scan of module into DB.
time.sleep(3)
if count != None:
dbupdate.update_scan_record({"scanid": scanid}, {"$set": {"total_scan": count}})
else:
dbupdate.update_scan_record({"scanid": scanid}, {"$set": {module_name: "Y"}})
def modules_scan(url,method,headers,body,scanid=None):
'''Scanning API using different engines '''
attack = read_scan_policy()
if attack is None:
print("Failed to start scan.")
sys.exit(1)
if scanid is None:
scanid = generate_scanid()
count = 0
for key,value in attack.items():
if value == 'Y' or value =='y':
count += 1
update_scan_status(scanid,"",count)
if attack['zap'] == "Y" or attack['zap'] == "y":
api_scan = zap_scan()
status = zap_start()
if status is True:
api_scan.start_scan(url,method,headers,body,scanid)
# Custom modules scan
if attack['cors'] == 'Y' or attack['cors'] == 'y':
handleException(lambda: cors_main(url,method,headers,body,scanid), "CORS")
update_scan_status(scanid, "cors")
if attack['Broken auth'] == 'Y' or attack['Broken auth'] == 'y':
handleException(lambda: auth_check(url,method,headers,body,scanid), "Authentication")
update_scan_status(scanid, "auth")
if attack['Rate limit'] == 'Y' or attack['Rate limit'] == 'y':
handleException(lambda: rate_limit(url,method,headers,body,scanid), "Rate limit")
update_scan_status(scanid, "Rate limit")
if attack['csrf'] == 'Y' or attack['csrf'] == 'y':
handleException(lambda: csrf_check(url,method,headers,body,scanid), "CSRf")
update_scan_status(scanid, "csrf")
if attack['jwt'] == 'Y' or attack['jwt'] == 'y':
handleException(lambda: jwt_check(url,method,headers,body,scanid), "JWT")
update_scan_status(scanid, "jwt")
if attack['sqli'] == 'Y' or attack['sqli'] == 'y':
handleException(lambda: sqli_check(url,method,headers,body,scanid), "SQL injection")
update_scan_status(scanid, "sqli")
if attack['xss'] == 'Y' or attack['xss'] == 'y':
handleException(lambda: xss_check(url,method,headers,body,scanid), "XSS")
update_scan_status(scanid, "xss")
if attack['open-redirection'] == 'Y' or attack['open-redirection'] == 'y':
handleException(lambda: open_redirect_check(url,method,headers,body,scanid), "Open redirect")
update_scan_status(scanid, "open-redirection")
if attack['xxe'] == 'Y' or attack['xxe'] == 'y':
xxe = xxe_scan()
handleException(lambda: xxe.xxe_test(url,method,headers,body,scanid), "XXE")
update_scan_status(scanid, "xxe")
if attack['crlf'] == 'Y' or attack['crlf'] == 'y':
handleException(lambda: crlf_check(url,method,headers,body,scanid), "CRLF")
update_scan_status(scanid, "crlf")
if attack['security_headers'] == 'Y' or attack['security_headers'] == 'y':
handleException(lambda: security_headers_missing(url,method,headers,body,scanid), "security_headers")
update_scan_status(scanid, "security_headers")
def handleException(method, module_name):
try:
#raise Exception("handle exception")
method()
except Exception:
print("exception in", module_name)
def validate_data(url,method):
''' Validate HTTP request data and return boolean value'''
validate_url = urlparse.urlparse(url)
http_method = ['GET','POST','DEL','OPTIONS','PUT']
if method in http_method and bool(validate_url.scheme) is True:
validate_result = True
else:
validate_result = False
return validate_result
def scan_single_api(url, method, headers, body, api, scanid=None):
''' This function deals with scanning a single API. '''
if headers is None or headers == '':
headers = {'Content-Type': 'application/json'}
try:
# Convert header and body in dict format
if type(headers) != dict:
headers = ast.literal_eval(headers)
if body:
if type(body) != dict:
body = ast.literal_eval(body)
except:
return False
if method == '':
method = 'GET'
result = validate_data(url, method)
if result is False:
print("[-]Invalid Arguments")
return False
if api == "Y":
p = Process(target=modules_scan,args=(url,method,headers,body,scanid),name='module-scan')
p.start()
if api == "Y":
return True
else:
modules_scan(url,method,headers,body,scanid)
def scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require):
''' Scan API through different engines '''
scanid = generate_scanid()
if collection_type and collection_name != None:
parse_collection(collection_name,collection_type)
if login_require is True:
api_login.verify_login(parse_data.api_lst)
for data in parse_data.api_lst:
try:
url = data['url']['raw']
except:
url = data['url']
headers,method,body = data['headers'],data['method'],''
if headers:
try:
headers = add_headers(headers)
except:
pass
if data['body'] != '':
body = json.loads(base64.b64decode(data['body']))
modules_scan(url,method,headers,body,scanid)
else:
print("%s [-]Invalid Collection. Please recheck collection Type/Name %s" %(api_logger.G, api_logger.W))
def get_arg(args=None):
parser = argparse.ArgumentParser(description='Astra - REST API Security testing Framework')
parser.add_argument('-c', '--collection_type',
help='Type of API collection',
default='Postman')
parser.add_argument('-n', '--collection_name',
help='Type of API collection')
parser.add_argument('-u', '--url',
help='URL of target API')
parser.add_argument('-headers', '--headers',
help='Custom headers.Example: {"token": "123"}')
parser.add_argument('-method', '--method',
help='HTTP request method',
default='GET',choices=('GET', 'POST', 'PUT','DELETE'))
parser.add_argument('-b', '--body',
help='Request body of API')
parser.add_argument('-l', '--loginurl',
help='URL of login API')
parser.add_argument('-H', '--loginheaders',
help='Headers should be in a dictionary format. Example: {"accesstoken": "axzvbqdadf"}')
parser.add_argument('-d', '--logindata',
help='login data of API')
results = parser.parse_args(args)
if len(args) == 0:
print("%sAt least one argument is needed to procced.\nFor further information check help: %spython astra.py --help%s"% (api_logger.R, api_logger.G, api_logger.W))
sys.exit(1)
return (results.collection_type,
results.collection_name,
results.url,
results.headers,
results.method,
results.body,
results.loginurl,
results.loginheaders,
results.logindata,
)
def main():
collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata = get_arg(sys.argv[1: ])
if loginheaders is None:
loginheaders = {'Content-Type': 'application/json'}
if collection_type and collection_name and loginurl and loginmethod and logindata:
# Login data is given as an input.
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and loginurl:
# This will first find the given loginurl from collection and it will fetch auth token.
parse_collection(collection_name,collection_type)
try:
loginurl,lognheaders,loginmethod,logidata = api_login.parse_logindata(loginurl)
except:
print("[-]%s Failed to detect login API from collection %s " %(api_logger.R, api_logger.W))
sys.exit(1)
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif loginurl and loginmethod:
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url:
if headers is None:
headers = {'Content-Type': 'application/json'}
if method is None:
method = "GET"
login_require = False
else:
login_require = True
if body:
body = ast.literal_eval(body)
# Configuring ZAP before starting a scan
get_auth = get_value('config.property','login','auth_type')
if collection_type and collection_name != None:
scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require)
else:
scanid = generate_scanid()
scan_single_api(url, method, headers, body, "F", scanid)
scan_complete()
if __name__ == '__main__':
api_login = APILogin()
parse_data = PostmanParser()
api_logger = logger()
api_logger.banner()
main()
|
problem_20_3_synchronize_two_threads.py
|
from threading import Lock, Thread, Event
lock = Lock()
event = Event()
def is_odd_turn(event_state):
return event_state is False
def is_even_turn(event_state):
return event_state is True
def switch_to_even(event_object):
event_object.set()
def switch_to_odd(event_object):
event_object.clear()
def odd_thread():
count = 1
while count < 100:
if is_odd_turn(event.is_set()):
print(count, 'odd thread')
count += 2
switch_to_even(event)
def even_thread():
count = 0
while count < 100:
if is_even_turn(event.is_set()):
print(count, 'even thread')
count += 2
switch_to_odd(event)
def solution():
oddThread = Thread(target=odd_thread, daemon=True)
evenThread = Thread(target=even_thread, daemon=True)
event.set()
evenThread.start()
oddThread.start()
while oddThread.is_alive() or evenThread.is_alive():
pass
print('all threads finished!')
solution()
|
keras_rl.py
|
# -*- coding: utf-8 -*-
import os
import sys
import gym
import numpy as np
import random
import keras
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, Bidirectional
from keras.optimizers import Adam
from collections import deque
from src.coreutils_gym_env import CoreutilsEnv, CoreutilsInfo
from src.utils.config import uroboros_env, instrs_size, max_ops_len
from coreutils_callable_env import *
import src.utils.log as log
from multiprocessing import Process, Pipe
from time import sleep
def sub_process_env_step(env, conn):
done = False
while not done:
action = conn.recv()
res = env.step(action)
done = res[2]
conn.send(res)
conn.close()
class DQN:
def __init__(self, env, model_path=None):
self.env = env
self.memory = deque(maxlen=1000)
self.gamma = 1.0
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.0001
self.min_lr = 0.00001
self.learning_decay = 0.5
self.tau = .125
self._action_space = self.env.action_space
if model_path:
self.load_model(model_path)
else:
self.init_model()
def init_model(self):
self.model = self.create_model()
self.target_model = self.create_model()
def load_model(self, model_path):
try:
self.model = keras.models.load_model(model_path)
self.target_model = self.create_model()
#self.target_model = keras.models.load_model(model_path)
self.set_lr(self.learning_rate)
except Exception as e:
log.log('cannot load model from %s' % model_path, log.LogType.ERROR)
log.log(str(e), log.LogType.ERROR)
self.init_model()
def create_model(self):
model = Sequential()
state_shape = self.env.observation_space.shape
#model.add(Embedding(input_dim=instrs_size, output_dim=2, input_length=state_shape[0]))
model.add(Bidirectional(LSTM(units=512, return_sequences=True)))
model.add(LSTM(units=512))
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=self._action_space.n))
model.compile(loss="mean_squared_error",
optimizer=Adam(lr=self.learning_rate))
return model
def set_lr(self, lr):
current_lr = keras.backend.eval(self.model.optimizer.lr)
log.log('set learning rate from %f to %f' % (current_lr, lr), log.LogType.WARNING)
self.learning_rate = lr
keras.backend.set_value(self.model.optimizer.lr, self.learning_rate)
keras.backend.set_value(self.target_model.optimizer.lr, self.learning_rate)
def reduce_lr(self):
if self.learning_rate == self.min_lr:
return
new_learning_rate = self.learning_rate * self.learning_decay
if new_learning_rate < self.min_lr:
new_learning_rate = self.min_lr
log.log('reduce learning rate from %f to %f' % (self.learning_rate, new_learning_rate))
self.learning_rate = new_learning_rate
keras.backend.set_value(self.model.optimizer.lr, self.learning_rate)
keras.backend.set_value(self.target_model.optimizer.lr, self.learning_rate)
def act(self, state):
self.epsilon *= self.epsilon_decay
self.epsilon = max(self.epsilon_min, self.epsilon)
if np.random.random() < self.epsilon:
return self._action_space.sample()
return np.argmax(self.model.predict(state)[0])
def remember(self, state, action, reward, new_state, done):
self.memory.append([state, action, reward, new_state, done])
def replay(self):
batch_size = 128
if len(self.memory) < batch_size:
return
# batch_size = len(self.memory)
samples = random.sample(self.memory, batch_size)
X = []
y = []
for sample in samples:
state, action, reward, new_state, done = sample
target = self.target_model.predict(state)
if done:
target[0][action] = reward
else:
Q_future = max(self.target_model.predict(new_state)[0])
target[0][action] = reward + Q_future * self.gamma
X.append(state)
y.append(target)
#X = np.array(X).reshape(batch_size, instrs_size, max_ops_len)
X = np.array(X).reshape(batch_size, 1, max_ops_len)
y = np.array(y).reshape(batch_size, self._action_space.n)
log.log('training ...', log.LogType.INFO)
self.model.fit(X, y, epochs=1, verbose=0)
def target_train(self):
weights = self.model.get_weights()
target_weights = self.target_model.get_weights()
for i in range(len(target_weights)):
target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)
self.target_model.set_weights(target_weights)
def save_model(self, fn, iteration):
if not os.path.isdir(fn):
os.mkdir(fn)
file_path = os.path.join(fn, 'dqn_model-%d' % iteration)
self.model.save_weights(file_path)
def finish_an_episode(envs, dqn_agent):
num = len(envs)
finished = [False for _ in range(num)]
#cur_states = [env.reset().reshape(1, instrs_size, max_ops_len) for env in envs]
cur_states = [env.reset().reshape(1, 1, max_ops_len) for env in envs]
conns = [Pipe() for _ in range(num)]
processes = [Process(target=sub_process_env_step, args=(envs[idx], conns[idx][1])) for idx in range(num)]
for p in processes:
p.start()
while True:
has_active = False
actions = [-1 for _ in range(num)]
for idx in range(num):
if not finished[idx]:
has_active = True
action = dqn_agent.act(cur_states[idx])
conns[idx][0].send(action)
sleep(0.5)
actions[idx] = action
if not has_active:
break
for idx in range(num):
if not finished[idx]:
new_state, reward, done, log_dict = conns[idx][0].recv()
#new_state = new_state.reshape(1, instrs_size, max_ops_len)
new_state = new_state.reshape(1, 1, max_ops_len)
if log_dict['reset']: # error process
finished[idx] = True
continue
else:
finished[idx] = done
if done:
conns[idx][0].close()
dqn_agent.remember(cur_states[idx], actions[idx], reward, new_state, done)
cur_states[idx] = new_state
for _ in range(3):
dqn_agent.replay() # internally iterates default (prediction) model
dqn_agent.target_train() # iterates target mode
for p in processes:
p.join()
def main():
num_iterations = 100
# assign the minimum _original_cycles_cost to all envs
min_original_cost = 1000000000000.0
for env in train_envs:
min_original_cost = min(env._original_cycles_cost, min_original_cost)
for env in train_envs:
env._original_cycles_cost = min_original_cost
# updateTargetNetwork = 1000
dqn_agent = DQN(env=train_envs[0], model_path=None)
for iteration in range(1, num_iterations+1):
for env in train_envs:
env.set_episode_count(iteration)
finish_an_episode(train_envs, dqn_agent)
if iteration % 5 == 0:
dqn_agent.save_model('./checkpoints', iteration)
if iteration % 20 == 0:
dqn_agent.reduce_lr()
if __name__ == "__main__":
main()
|
data_provider.py
|
import Queue
import random
import threading
import numpy
__all__ = [
'DataProvider',
'Disk', 'Batch',
]
class DataProvider(object):
def get(self, size):
raise NotImplemented
class Memory(DataProvider):
def __init__(self, filename, max_epoch=None, shuffle=False):
self._filename = filename
self._shuffle = shuffle
self._buffer = numpy.load(filename)
if shuffle:
numpy.random.shuffle(self._buffer)
self._index = 0
def get(self, size):
if self._index + size < self._buffer.shape[0]:
samples = self._buffer[self._index:self._index + size]
self._index += size
return samples
else:
remain = self._buffer.shape[0] - self._index
remain_samples = self._buffer[self._index:]
if self._shuffle:
numpy.random.shuffle(self._buffer)
self._index = size - remain
if remain > 0:
return numpy.concatenate([remain_samples, self._buffer[:self._index]])
else:
return self._buffer[:self._index]
class Disk(DataProvider):
def __init__(self, filenames, max_epoch=None, shuffle=False, capacity=4):
self._filenames = filenames
self._shuffle = shuffle
self._capacity = capacity
self._max_epoch = max_epoch
self._queue = Queue.Queue(maxsize=capacity)
self._empty = False
self._process = threading.Thread(target=self._worker)
self._process.setDaemon(True)
self._process.start()
self._buffer = self._queue.get()
self._index = 0
def _worker(self):
for _ in xrange(self._max_epoch) if self._max_epoch is not None else iter(int, 1):
if self._shuffle:
random.shuffle(self._filenames)
for filename in self._filenames:
self._queue.put(numpy.load(filename))
self._empty = True
def _queue_get(self):
if not self._empty or not self._queue.empty():
return self._queue.get()
else:
raise StopIteration
def get(self, size):
sample_buffers = []
if self._buffer.shape[0] - self._index == 0:
self._buffer = self._queue_get()
self._index = 0
while self._buffer.shape[0] - self._index < size:
sample_buffers.append(self._buffer[self._index:])
size -= self._buffer.shape[0] - self._index
self._buffer = self._queue_get()
self._index = 0
if size > 0:
sample_buffers.append(self._buffer[self._index:self._index + size])
self._index += size
return numpy.concatenate(sample_buffers)
class Batch(DataProvider):
def __init__(self, provider, batch_size, x_preprocess=None, y_preprocess=None, capacity=4):
self._provider = provider
self._batch_size = batch_size
if x_preprocess is None:
self._x_preprocess = []
elif isinstance(x_preprocess, list):
self._x_preprocess = x_preprocess
else:
self._x_preprocess = [x_preprocess]
if y_preprocess is None:
self._y_preprocess = []
elif isinstance(y_preprocess, list):
self._y_preprocess = y_preprocess
else:
self._y_preprocess = [y_preprocess]
self._queue = Queue.Queue(maxsize=capacity)
self._empty = False
self._process = threading.Thread(target=self._worker)
self._process.setDaemon(True)
self._process.start()
def _worker(self):
while True:
try:
samples = self._provider.get(self._batch_size)
x = numpy.asarray([samples[i, 0] for i in xrange(self._batch_size)])
y = numpy.asarray([samples[i, 1] for i in xrange(self._batch_size)])
for func in self._x_preprocess:
x = func(x)
for func in self._y_preprocess:
y = func(y)
self._queue.put((x, y))
except StopIteration:
break
self._empty = True
def _queue_get(self):
if not self._empty or not self._queue.empty():
return self._queue.get()
else:
raise StopIteration
def get(self, size=1):
if size == 1:
return self._queue_get()
else:
return [self._queue_get() for _ in xrange(size)]
|
process_parallel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-10-08
# @Author : Anh Hoang (anhhoang.work.mail@gmail.com)
# @Project : FSCognitive
# @Version : 1.0
from multiprocessing import Process
class ProcessParallel(object):
"""
To Process the functions parallely
"""
def __init__(self, *jobs):
"""
"""
self.jobs = jobs
self.processes = []
def fork_processes(self):
"""
Creates the process objects for given function deligates
"""
for job in self.jobs:
proc = Process(target=job)
self.processes.append(proc)
def fork_threads(self):
for job in self.jobs:
self.processes.append(job)
def start_all(self):
"""
Starts the functions process all together.
"""
for proc in self.processes:
proc.start()
def join_all(self):
"""
Waits untill all the functions executed.
"""
for proc in self.processes:
proc.join()
|
ImageRunSockets_v3_B.py
|
import argparse
import logging
import csv
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
import os
import threading
import subprocess
import face_recognition
from PIL import Image
from struct import unpack
import time
import sys
import glob
from collections import deque
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
from autobahn.twisted.websocket import WebSocketServerFactory
from autobahn.twisted.websocket import WebSocketServerProtocol
class MyServerProtocol(WebSocketServerProtocol):
def __init__(self, process_pose=False, process_faces=False, saveLocation='D:/AimeeImageResults/PulledImages/'):
super().__init__()
self.process_pose = process_pose
self.process_faces = process_faces
#Deletes all previously stored images on the computer
self.resetNewLaunch()
#List of faces that have been seen before
self.faces = []
#List of images that will be processed
self.images = []
#Number of images required to run the processing (face recognition)
self.imageWait = 1
#Number of images that have not been processed
self.imageCount = 0
#Dictionary of ips with a cooresponding image name
self.names = {}
#List of people seen and the time at which they were seen
self.peopleSeen = open("D:/AimeeImageResults/peopleSeen.txt","w")
self.curr_file_name= {}
self.thread1 = threading.Thread(target = self.recognizeAndReset)
self.thread1.start()
#Location to save the images and results
self.saveLocation = saveLocation
def onConnect(self, request):
ip = request.peer
if ip not in self.names:
self.names[ip]=[]
if ip not in self.curr_file_name:
self.curr_file_name[ip]=None
def onMessage(self, payload, isBinary):
## echo back message verbatim
print("message received")
ip = self.peer
#If the string name is recieved
if not isBinary:
print(payload.decode('utf8'))
self.curr_file_name[ip] = payload.decode('utf8')
#If the data for the image is sent
else:
imageName = ip.replace(":","") + self.curr_file_name[ip]
self.imageCount += 1
#Save image to the specified save location
imgFile = open(os.path.join(self.saveLocation, imageName), 'wb')
imgFile.write(payload)
imgFile.close()
self.names[ip].append(imageName)
self.curr_file_name[ip] = None
print('image data')
#If there are enough images to process, process them
self.recognizeAndReset()
def onClose(self, wasClean, code, reason):
pass
#print("WebSocket connection closed: {}".format(reason))
def onOpen(self):
print("connection opened")
def recognizeAndReset(self):
if self.imageCount >= self.imageWait:
self.imageCount = 0
prepImages(self)
faceRecognize(self.images,self)
openpose(self.images,self)
resetImages(self)
#Removes and resets list for the iamge obtaining and processing procedure to begin again
def resetNewLaunch(self):
#Delete Images in image folder
for doc in glob.glob("D:/AimeeImageResults/Images/tcp*"):
os.remove(doc)
for doc in glob.glob("D:/AimeeImageResults/PulledImages/tcp*"):
os.remove(doc)
#Prepares images for processing
def prepImages(self):
for name in self.names:
more = True
while more:
if len(self.names[name]) > 0:
#take name off of name list so that it isnt processed again
imageName = self.names[name].pop(0)
self.images.append(imageName)
#Move images from the pulled images folder to the images folder for openpose to process a folder
os.rename("D:/AimeeImageResults/PulledImages/" + imageName, "D:/AimeeImageResults/Images/" + imageName)
else:
more = False
#Removes and resets list for the iamge obtaining and processing procedure to begin again
def resetImages(self):
self.images = []
#Delete Images in image folder
for doc in glob.glob("D:/AimeeImageResults/Images/tcp*"):
os.remove(doc)
#Runs each image through openpose and saves skeletal data
def openpose(images,self):
logger = logging.getLogger('TfPoseEstimator-Video')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
model = 'mobilenet_thin'
w = 432
h = 368
e = TfPoseEstimator(get_graph_path(self.model), target_size=(self.w, self.h))
logger.debug('cam read+')
#cam = cv2.VideoCapture(args.camera)
with open('D:/AimeeImageResults/bodyparts.csv', 'a', newline='') as csvfile:
myFields = ['frameNumber', 'personNumber', 'partNumber', 'xCoordinate', 'yCoordinate', 'score']
partwriter = csv.DictWriter(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL, fieldnames=myFields)
partwriter.writeheader()
frameNum = 0
imCount = 0
for imageName in images:
#Pull image from server
#im = Image.open("D:/AimeeImageResults/Images/"+ imageName)
image = common.read_imgfile("D:/AimeeImageResults/Images/"+ imageName, None, None)
if image is None:
self.logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = self.e.inference(image, resize_to_default=(self.w > 0 and self.h > 0), upsample_size=4.0)
elapsed = time.time() - t
imCount += 1
self.logger.info('inference image: %s in %.4f seconds.' % ("D:/AimeeImageResults/Images/"+ imageName, elapsed))
for personNum in range(len(humans)):
for partKey, part in humans[personNum].body_parts.items():
partwriter.writerow({'frameNumber':imageName,
'personNumber':personNum,
'partNumber':partKey,
'xCoordinate':part.x,
'yCoordinate':part.y,
'score':part.score})
self.logger.debug('show+')
## #image = np.zeros(image.shape)#this gets rid of background picture
## image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
## cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
## cv2.imshow('tf-pose-estimation result', image)
## cv2.waitKey(0)
fps_time = time.time()
cv2.destroyAllWindows()
#Finds a face in an image, if there is one, and determines if that face has been seen before
def faceRecognize(images,self):
imCount = 0
for imageName in images:
#Pull image from server
time1 = time.time()
image = face_recognition.load_image_file("D:/AimeeImageResults/Images/" + imageName)
im = Image.open("D:/AimeeImageResults/Images/"+ imageName)
imCount += 1
peopleSeen = open("D:/AimeeImageResults/peopleSeen.txt",'a')
print("Image open and retrieval took " + str(time.time() - time1) + " seconds")
time3 = time.time()
faceLocations = face_recognition.face_locations(image)
print("Face Locations took " + str(time.time() - time3) + " seconds")
#Crop and store each face found in the image
for face in range(0,len(faceLocations)):
top, right, bottom, left = faceLocations[face]
cropSection = (left, top, right, bottom)
cropped = im.crop(cropSection)
time2 = time.time()
cropEncodings = face_recognition.face_encodings(image)
print("Face encoding took " + str(time.time() - time2) + " seconds")
#See if a similar face was already found
for unknownFace in cropEncodings:
found = False
for knownFace in range(0,len(self.faces)):
#Compare Faces to exisiting ones
time4 = time.time()
if face_recognition.compare_faces([unknownFace],self.faces[knownFace]) and not found:
print("Face Comparison took " + str(time.time() - time4) + " seconds")
found = True
print("Person " + str(knownFace) + ": found in image")
#Write which face was seen and the image it was seen in (thus giving the time the image was taken)
peopleSeen.write(str(knownFace) + "found at " + imageName + "\n")
#If no face match in the database was found, add face to database
if not found:
print("New face added")
self.faces.append(unknownFace)
peopleSeen.write("Person " + str(len(self.faces)-1) + " found (for the first time) at " + imageName + "\n")
#cropped.save("C:/Users/TIILTMAINPC/Desktop/NoahImageResults/Faces/" + "Face " + str(face) + "-" + ips[j] + imageName)
if not(imCount == 0):
im.close()
if __name__ == "__main__":
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
reactor.listenTCP(45000, factory)
reactor.run()
|
utils.py
|
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Integration Test Utils"""
import logging
import os
import shutil
import subprocess
import threading
from typing import List, Optional
import time
import podman.tests.errors as errors
logger = logging.getLogger("podman.service")
class PodmanLauncher:
"""Podman service launcher"""
def __init__(
self,
socket_uri: str,
podman_path: Optional[str] = None,
timeout: int = 0,
privileged: bool = False,
log_level: int = logging.WARNING,
) -> None:
"""create a launcher and build podman command"""
podman_exe: str = podman_path
if not podman_exe:
podman_exe = shutil.which('podman')
if podman_exe is None:
raise errors.PodmanNotInstalled()
self.socket_file: str = socket_uri.replace('unix://', '')
self.log_level = log_level
self.proc = None
self.reference_id = hash(time.monotonic())
self.cmd: List[str] = []
if privileged:
self.cmd.append('sudo')
self.cmd.append(podman_exe)
self.cmd.append(f"--log-level={logging.getLevelName(log_level).lower()}")
if os.environ.get("container") == "oci":
self.cmd.append("--storage-driver=vfs")
self.cmd.extend(
[
"system",
"service",
f"--time={timeout}",
socket_uri,
]
)
process = subprocess.run(
[podman_exe, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
self.version = str(process.stdout.decode("utf-8")).strip().split()[2]
def start(self, check_socket=True) -> None:
"""start podman service"""
logger.info(
"Launching(%s) %s refid=%s",
self.version,
' '.join(self.cmd),
self.reference_id,
)
def consume_lines(pipe, consume_fn):
with pipe:
for line in iter(pipe.readline, b""):
consume_fn(line.decode("utf-8"))
def consume(line: str):
logger.debug(line.strip("\n") + f" refid={self.reference_id}")
self.proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
threading.Thread(target=consume_lines, args=[self.proc.stdout, consume]).start()
if not check_socket:
return
# wait for socket to be created
timeout = time.monotonic() + 30
while not os.path.exists(self.socket_file):
if time.monotonic() > timeout:
raise subprocess.TimeoutExpired("podman service ", timeout)
time.sleep(0.2)
def stop(self) -> None:
"""stop podman service"""
if not self.proc:
return
self.proc.terminate()
try:
return_code = self.proc.wait(timeout=15)
except subprocess.TimeoutExpired:
self.proc.kill()
return_code = self.proc.wait()
self.proc = None
logger.info("Command return Code: %d refid=%s", return_code, self.reference_id)
|
browser.py
|
#!/usr/bin/python3.7
from selenium.webdriver import Firefox,FirefoxProfile
from selenium.webdriver.firefox.options import Options
from user_agent import generate_user_agent
from core.color import *
from core.module_utils import *
from core import Settings
import os, pickle, json, time, threading, functools, traceback
# In Sessions folder we have a json file contains all data about sessions like ids and cookie file path that saved with pickle
def generate_profile(useragent="(default)"):
profile = FirefoxProfile()
if useragent.strip().lower()=="(default)":
status("Using the default useragent")
return profile
elif useragent.strip().lower()=="(random)":
random_useragent = generate_user_agent(os=('mac', 'linux'))
profile.set_preference("general.useragent.override", random_useragent) # To make our useragent random
status("Using random useragent "+random_useragent)
return profile
else:
profile.set_preference("general.useragent.override", useragent)
status("Using useragent "+useragent)
return profile
def Run_inside_thread(thread_name):
def hook(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, name=thread_name, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return wrapper
return hook
class headless_browsers:
# Here we create invisble browsers, fast and in an organized way without repeating browsers for the same module
def __init__(self):
self.opts = Options()
self.opts.add_argument("--headless") # To make firefox invisible of course (Headless)
self.browsers = {} # Here we save all the browsers we create so we can control and use later
self.useragent = ""
self.sessions_file = os.path.join("core","sessions.json")
def new_session(self, module_name, url, useragent="(random)"):
if self.browsers!={} and module_name in list(self.browsers.keys()) and self.browsers[module_name]["Status"]:
return {"Status":"Duplicate"}
else:
new_headless = {module_name:{"host":"","port":""}}
new_headless[module_name]["url"] = url
if not useragent.strip(): # This if condition is useless because module won't let the useragent to be empty but I will leave it just in case...
return {"Status":"Invalid useragent"}
else:
profile = generate_profile(useragent)
try:
#TODO
new_headless[module_name]["Controller"] = None
if Settings.debug:
new_headless[module_name]["Controller"] = Firefox(profile)#options=self.opts) # Inserting the browser object
else:
new_headless[module_name]["Controller"] = Firefox(profile, options=self.opts) # Inserting the browser object
except Exception as e:
if Settings.debug:
print(" Exception: "+str(e))
print(" Trackback: ")
traceback.print_exc()
return {"Status":"Failed"}
else:
new_headless[module_name]["Status"] = "Success"
self.browsers.update(new_headless)
new_headless[module_name]["Controller"].get(url)
self.useragent = new_headless[module_name]["Controller"].execute_script("return navigator.userAgent;")
return new_headless[module_name]
@Run_inside_thread("Sessions catcher thread")
def create_listener(self, module_name, change_identifier, session_type):
# If I used another function to run this one as thread, python would be upset :D
# So I'm using a decorator and also it looks cooler :D
try:
status(f"Waiting for sessions on {module_name}")
controller = self.browsers[module_name]["Controller"]
if controller:
while self.browsers[module_name]["Status"] == "Success":
null = controller.find_elements_by_xpath(change_identifier)
if not null:
# If we got here then that means we got session
print()
status(f"Got session on {module_name} module")
if session_type.lower() == "localstorage":
self.save_localstorage(module_name)
else:
self.save_cookie(module_name)
if Settings.verbose:
status("Reseting browser cookies and localStorage to start over..")
#self.restart_session(self.browsers[module_name])
controller.delete_all_cookies()
controller.execute_script("window.localStorage.clear()")
controller.refresh()
if Settings.verbose:
status("Session reset successfully")
time.sleep(5)
else:
time.sleep(5)
else:
error(f"Browser controller hasn't been created [{module_name}]")
except:
return
@Run_inside_thread("QR updater thread")
def website_qr(self, module_name, img_xpath):
# Always download the QR image from the site to use it in the webserver
status(f"Running a thread to keep the QR image [{module_name}]")
controller = self.browsers[module_name]["Controller"]
if controller:
while self.browsers[module_name]["Status"] == "Success":
try:
misc.Screenshot(controller, img_xpath, module_name)
#if Settings.verbose: status(f"QR code image updated! [{module_name}]")
time.sleep(3)
except:
time.sleep(1)
else:
error(f"Browser controller hasn't been created [{module_name}]")
@Run_inside_thread("Idle detector thread")
def check_img(self, module_name, button_xpath):
# Checks if QR image got blocked by a reloading button and click it
status(f"Running a thread to detect Idle once it happens then click the QR reload button [{module_name}]")
controller = self.browsers[module_name]["Controller"]
if controller:
while self.browsers[module_name]["Status"] == "Success":
try:
btn = controller.find_element_by_xpath(button_xpath) # now it should work
# If we got here then that means we got the button
if Settings.verbose: status(f"Idle detected, Reloading QR code image [{module_name}]")
btn.click()
time.sleep(5)
except:
time.sleep(1) # Yeah we need to be fast
else:
error(f"Browser controller hasn't been created [{module_name}]")
@Run_inside_thread("Webserver manager thread")
def serve_module(self, module_name, host, port):
# Start a webserver for module and automatically close it when module closed
status(f"Initializing webserver... [{module_name}]")
self.browsers[module_name]["host"] = "http://"+host
self.browsers[module_name]["port"] = str(port)
webserver = server(name=module_name,port=port)
webserver.start_serving(host)
while self.browsers[module_name]["Status"] == "Success":
time.sleep(1)
# Well, the module got stopped
webserver.stop_web_server()
def save_localstorage(self,module_name):
browser = self.browsers[module_name]["Controller"]
session_file_name = os.path.join( "sessions",time.ctime().replace(" ","-") )+".session"
session_file = open(session_file_name,"wb")
pickle.dump( browser.execute_script("return localStorage"), session_file)
session_file.close()
if Settings.debug:
status("localStorage data saved in "+session_file_name)
# Now let's save session details into sessions file
with open( self.sessions_file ) as f:
try:
sessions = json.load(f)
except:
sessions = {}
for i in range(0,1000):
if str(i) not in list(sessions.keys()):
session_id = str(i)
break
session = {
session_id:{
"name":module_name,
"web_url":self.browsers[module_name]["url"],
"session_type":"localStorage",
"useragent":self.useragent,
"session_path":session_file_name
}
}
sessions.update(session)
f = open( self.sessions_file,"w" )
json.dump(sessions, f, indent=2)
f.close()
status("Session saved successfully")
def save_cookie(self,module_name):
# First let's save the browser cookies before anything
browser = self.browsers[module_name]["Controller"]
session_file_name = os.path.join( "sessions",time.ctime().replace(" ","-") )+".session"
session_file = open(session_file_name,"wb")
pickle.dump( browser.get_cookies(), session_file)
session_file.close()
if Settings.debug:
status("Cookies saved in "+session_file_name)
# Now let's save session details into sessions file
with open( self.sessions_file ) as f:
try:
sessions = json.load(f)
except:
sessions = {}
for i in range(0,1000):
if str(i) not in list(sessions.keys()):
session_id = str(i)
break
session = {
session_id:{
"name":module_name,
"web_url":self.browsers[module_name]["url"],
"session_type":"cookie",
"useragent":self.useragent,
"session_path":session_file_name
}
}
sessions.update(session)
f = open( self.sessions_file,"w" )
json.dump(sessions, f, indent=2)
f.close()
status("Session saved successfully")
def close_all(self):
if self.browsers!={}: # I'm using this comparsion because it's is faster than comparsion with keys length btw
for module_name in list(self.browsers.keys()):
try:
self.browsers[module_name]["Controller"].close() # To close the browser
except: # Some one played with the browser so it lost control lol
pass
self.browsers[module_name]["Controller"] = None # Reseting the browser controller
self.browsers[module_name]["Status"] = None # To close any listener working on this browser
def close_job(self, module_name):
if self.browsers!={}:
if module_name in list(self.browsers.keys()):
try:
self.browsers[module_name]["Controller"].close() # To close the browser
except: # Some one played with the browser so it lost control lol
pass
self.browsers[module_name]["Controller"] = None # Reseting the browser controller
self.browsers[module_name]["Status"] = None # To close any listener working on this browser
class visible_browsers:
# Here we open sessions for user with cookies we already have from sessions
def __init__(self):
self.browsers = []
self.sessions_file = os.path.join("core","sessions.json")
def load_localstorage(self, session_id):
sessions = json.load(open( self.sessions_file ))
storage_path = sessions[str(session_id)]["session_path"]
url = sessions[str(session_id)]["web_url"]
# Setting useragent to the same one the session saved with
useragent = sessions[str(session_id)]["useragent"]
profile = FirefoxProfile()
profile.set_preference("general.useragent.override", useragent )
localStorage = pickle.load(open(storage_path, "rb"))
try:
browser = Firefox(profile)
except:
error("Couldn't open browser to view session!")
return
browser.get(url)
browser.delete_all_cookies()
browser.execute_script("window.localStorage.clear()") # clear the current localStorage
for key,value in localStorage.items():
browser.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
status(f"Session {session_id} loaded")
browser.refresh()
self.browsers.append(browser)
def load_cookie(self, session_id):
sessions = json.load(open( self.sessions_file ))
cookie_path = sessions[str(session_id)]["session_path"]
url = sessions[str(session_id)]["web_url"]
# Setting useragent to the same one the session saved with
useragent = sessions[str(session_id)]["useragent"]
profile = FirefoxProfile()
profile.set_preference("general.useragent.override", useragent )
cookies = pickle.load(open(cookie_path, "rb"))
try:
browser = Firefox(profile)
except:
error("Couldn't open browser to view session!")
return
browser.get(url)
browser.delete_all_cookies()
browser.execute_script("window.localStorage.clear()") # clear the current localStorage
for cookie in cookies:
browser.add_cookie(cookie)
status(f"Session {session_id} loaded")
browser.refresh()
self.browsers.append(browser)
|
test_currentthreadscheduler.py
|
import pytest
import unittest
import threading
from datetime import timedelta
from time import sleep
from rx.scheduler import CurrentThreadScheduler
from rx.internal.basic import default_now
class TestCurrentThreadScheduler(unittest.TestCase):
def test_currentthread_singleton(self):
scheduler = [
CurrentThreadScheduler(),
CurrentThreadScheduler.singleton(),
CurrentThreadScheduler.singleton()
]
assert scheduler[0] is not scheduler[1]
assert scheduler[1] is scheduler[2]
gate = [threading.Semaphore(0), threading.Semaphore(0)]
scheduler = [None, None]
def run(idx):
scheduler[idx] = CurrentThreadScheduler.singleton()
gate[idx].release()
for idx in (0, 1):
threading.Thread(target=run, args=(idx,)).start()
gate[idx].acquire()
assert scheduler[0] is not None
assert scheduler[1] is not None
assert scheduler[0] is not scheduler[1]
def test_currentthread_extend(self):
class MyScheduler(CurrentThreadScheduler):
pass
scheduler = [
MyScheduler(),
MyScheduler.singleton(),
MyScheduler.singleton(),
CurrentThreadScheduler.singleton(),
]
assert scheduler[0] is not scheduler[1]
assert scheduler[1] is scheduler[2]
assert scheduler[1] is not scheduler[3]
def test_currentthread_now(self):
scheduler = CurrentThreadScheduler()
diff = scheduler.now - default_now()
assert abs(diff) < timedelta(milliseconds=5)
def test_currentthread_now_units(self):
scheduler = CurrentThreadScheduler()
diff = scheduler.now
sleep(1.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=1000) < diff < timedelta(milliseconds=1300)
def test_currentthread_schedule(self):
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
scheduler.schedule(action)
assert ran is True
def test_currentthread_schedule_block(self):
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
t = scheduler.now
scheduler.schedule_relative(0.2, action)
t = scheduler.now - t
assert ran is True
assert t >= timedelta(seconds=0.2)
def test_currentthread_schedule_error(self):
scheduler = CurrentThreadScheduler()
class MyException(Exception):
pass
def action(scheduler, state=None):
raise MyException()
with pytest.raises(MyException):
scheduler.schedule(action)
def test_currentthread_schedule_nested(self):
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
def inner_action(scheduler, state=None):
nonlocal ran
ran = True
return scheduler.schedule(inner_action)
scheduler.schedule(action)
assert ran is True
def test_currentthread_schedule_nested_order(self):
scheduler = CurrentThreadScheduler()
tests = []
def outer(scheduler, state=None):
def action1(scheduler, state=None):
tests.append(1)
def action2(scheduler, state=None):
tests.append(2)
CurrentThreadScheduler().schedule(action2)
CurrentThreadScheduler().schedule(action1)
def action3(scheduler, state=None):
tests.append(3)
CurrentThreadScheduler().schedule(action3)
scheduler.ensure_trampoline(outer)
assert tests == [1, 2, 3]
def test_currentthread_singleton_schedule_nested_order(self):
scheduler = CurrentThreadScheduler.singleton()
tests = []
def outer(scheduler, state=None):
def action1(scheduler, state=None):
tests.append(1)
def action2(scheduler, state=None):
tests.append(2)
scheduler.schedule(action2)
scheduler.schedule(action1)
def action3(scheduler, state=None):
tests.append(3)
scheduler.schedule(action3)
scheduler.ensure_trampoline(outer)
assert tests == [1, 3, 2]
def test_currentthread_ensuretrampoline(self):
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduer, state=None):
def action1(scheduler, state=None):
nonlocal ran1
ran1 = True
scheduler.schedule(action1)
def action2(scheduler, state=None):
nonlocal ran2
ran2 = True
return scheduler.schedule(action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 is True
assert ran2 is True
def test_currentthread_ensuretrampoline_nested(self):
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
scheduler.schedule(inner_action1)
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
return scheduler.schedule(inner_action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 is True
assert ran2 is True
def test_currentthread_ensuretrampoline_and_cancel(self):
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule(inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 is True
assert ran2 is False
def test_currentthread_ensuretrampoline_and_canceltimed(self):
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
t = scheduler.now + timedelta(seconds=0.5)
d = scheduler.schedule_absolute(t, inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 is True
assert ran2 is False
|
filerapp.py
|
"""
Module implementing simple application for a simple P2P network.
"""
import sys
import threading
import random
import tkinter as tk
from btfiler import *
class BTGui(tk.Frame):
def __init__(self, serverhost, serverport, firstpeer=None, hops=2, maxpeers=5, master=None):
tk.Frame.__init__(self, master)
self.pack()
self.createWidgets()
self.master.title('File Sharing App - {}:{}'.format(serverhost, serverport))
self.btpeer = FilePeer(maxpeers=maxpeers, serverhost=serverhost, serverport=serverport)
self.bind("<Destroy>", self.__onDestroy)
if firstpeer is not None:
host, port = firstpeer.split(':')
self.btpeer.buildpeers(host, int(port), hops=hops)
self.update_peer_list()
t = threading.Thread(target=self.btpeer.main_loop)
t.setDaemon(True)
t.start()
self.btpeer.stabilizer = self.btpeer.check_live_peers
self.btpeer.startstabilizer(3)
self.after(3000, self.onTimer)
def onTimer(self):
self.onRefresh()
self.after(3000, self.onTimer)
def __onDestroy(self, event):
self.btpeer.shutdown = True
def update_peer_list(self):
if self.peerList.size() > 0:
self.peerList.delete(0, self.peerList.size() - 1)
for p in self.btpeer.peers:
self.peerList.insert(tk.END, p)
def update_file_list(self):
if self.fileList.size() > 0:
self.fileList.delete(0, self.fileList.size() - 1)
for f, p in self.btpeer.files.items():
if p is None:
p = '(local)'
self.fileList.insert(tk.END, '{}:{}'.format(f, p))
def createWidgets(self):
fileFrame = tk.Frame(self)
peerFrame = tk.Frame(self)
rebuildFrame = tk.Frame(self)
searchFrame = tk.Frame(self)
addfileFrame = tk.Frame(self)
pbFrame = tk.Frame(self)
fileFrame.grid(row=0, column=0, sticky=tk.N+tk.S)
peerFrame.grid(row=0, column=1, sticky=tk.N+tk.S)
pbFrame.grid(row=2, column=1)
addfileFrame.grid(row=3)
searchFrame.grid(row=4)
rebuildFrame.grid(row=3, column=1)
tk.Label(fileFrame, text='Available Files').grid()
tk.Label(peerFrame, text='Peer List').grid()
fileListFrame = tk.Frame(fileFrame)
fileListFrame.grid(row=1, column=0)
fileScroll = tk.Scrollbar(fileListFrame, orient=tk.VERTICAL)
fileScroll.grid(row=0, column=1, sticky=tk.N+tk.S)
self.fileList = tk.Listbox(fileListFrame, height=5, yscrollcommand=fileScroll.set)
self.fileList.grid(row=0, column=0, sticky=tk.N+tk.S)
fileScroll["command"] = self.fileList.yview
self.fetchButton = tk.Button(fileFrame, text='Fetch',
command=self.onFetch)
self.fetchButton.grid()
self.addfileEntry = tk.Entry(addfileFrame, width=25)
self.addfileButton = tk.Button(addfileFrame, text='Add',
command=self.onAdd)
self.addfileEntry.grid(row=0, column=0)
self.addfileButton.grid(row=0, column=1)
self.searchEntry = tk.Entry(searchFrame, width=25)
self.searchButton = tk.Button(searchFrame, text='Search',
command=self.onSearch)
self.searchEntry.grid(row=0, column=0)
self.searchButton.grid(row=0, column=1)
peerListFrame = tk.Frame(peerFrame)
peerListFrame.grid(row=1, column=0)
peerScroll = tk.Scrollbar(peerListFrame, orient=tk.VERTICAL)
peerScroll.grid(row=0, column=1, sticky=tk.N+tk.S)
self.peerList = tk.Listbox(peerListFrame, height=5,
yscrollcommand=peerScroll.set)
self.peerList.grid(row=0, column=0, sticky=tk.N+tk.S)
peerScroll["command"] = self.peerList.yview
self.removeButton = tk.Button(pbFrame, text='Remove',
command=self.onRemove)
self.refreshButton = tk.Button(pbFrame, text = 'Refresh',
command=self.onRefresh)
self.rebuildEntry = tk.Entry(rebuildFrame, width=25)
self.rebuildButton = tk.Button(rebuildFrame, text = 'Rebuild',
command=self.onRebuild)
self.removeButton.grid(row=0, column=0)
self.refreshButton.grid(row=0, column=1)
self.rebuildEntry.grid(row=0, column=0)
self.rebuildButton.grid(row=0, column=1)
def onAdd(self):
file = self.addfileEntry.get()
filename = file.strip()
if filename:
self.btpeer.add_local_file(filename)
self.addfileEntry.delete(0, len(file))
self.update_file_list()
def onSearch(self):
key = self.searchEntry.get()
self.searchEntry.delete(0, len(key))
ttl = 4
msgdata = '{} {} {}'.format(self.btpeer.myid, key, ttl)
for p in self.btpeer.peers:
self.btpeer.send2peer(p, QUERY.signal_name, msgdata)
def onFetch(self):
sels = self.fileList.curselection()
if len(sels) == 1:
sel = self.fileList.get(sels[0]).split(':')
if len(sel) > 2: # fname:host:port
fname, host, port = sel
resp = self.btpeer.connect_and_send(host, port, FILEGET.signal_name, fname)
if len(resp) and resp[0][0] == REPLY.signal_name:
with open(fname, 'w', encoding='utf-8') as f:
f.write(resp[0][1])
self.btpeer.files[fname] = None # it's local now.
def onRemove(self):
sels = self.peerList.curselection()
if len(sels) == 1:
peerid = self.peerList.get(sels[0])
self.btpeer.send2peer(peerid, PEERQUIT.signal_name, self.btpeer.myid)
self.btpeer.removepeer(peerid)
def onRefresh(self):
self.update_peer_list()
self.update_file_list()
def onRebuild(self):
if not self.btpeer.maxpeer_searched():
peerid = self.rebuildEntry.get()
self.rebuildEntry.delete(0, len(peerid))
peerid = peerid.strip()
try:
host, port = peerid.split(':')
self.btpeer.buildpeers(host, port, hops=3)
except:
traceback.print_exc()
def main():
if len(sys.argv) < 3:
print('Syntax: server-host server-port max-peers first-peer-ip:first-peer-port')
sys.exit(1)
serverhost = sys.argv[1]
serverport = int(sys.argv[2])
maxpeers = 5
peerid = None
if len(sys.argv) >= 4:
maxpeers = int(sys.argv[3])
if len(sys.argv) >= 5:
peerid = sys.argv[4]
app = BTGui(
serverhost=serverhost,
serverport=serverport,
maxpeers=maxpeers,
firstpeer=peerid,
)
app.mainloop()
if __name__ == '__main__':
main()
|
_keyhandler.py
|
import asyncio
import string
import sys
import threading
from itertools import chain
from precept._tools import is_windows
class Key: # pragma: no cover
def __init__(self, value, clean=None):
self.value = value
self.clean = clean
def __str__(self):
return self.clean or self.value
def __eq__(self, other):
if isinstance(other, Key):
return other.value == self.value
if isinstance(other, str):
return other == self.value
return False
def __hash__(self):
return hash(self.value)
def __repr__(self):
return f"<Key '{self.clean or self.value}'>"
class Keys: # pragma: no cover
SPACE = Key(' ', 'space')
BACKSPACE = Key('\x7f', 'backspace')
ENTER = Key('\r', 'enter')
ESCAPE = Key('\x1b', 'escape')
INSERT = Key('\x1b[2~', 'insert')
END = Key('\x1b[F', 'end')
HOME = Key('\x1b[H', 'home')
DELETE = Key('\x1b[3~', 'delete')
DOWN = Key('\x1b[B', 'down')
UP = Key('\x1b[A', 'up')
LEFT = Key('\x1b[D', 'left')
RIGHT = Key('\x1b[C', 'right')
F1 = Key('\x1bOP', 'F1')
F2 = Key('\x1bOQ', 'F2')
F3 = Key('\x1bOR', 'F3')
F4 = Key('\x1bOS', 'F4')
F5 = Key('\x1bO15~', 'F5')
F6 = Key('\x1bO17~', 'F6')
F7 = Key('\x1bO18~', 'F7')
F8 = Key('\x1bO19~', 'F8')
F9 = Key('\x1bO20~', 'F9')
F10 = Key('\x1bO21~', 'F10')
F11 = Key('\x1bO23~', 'F11')
F12 = Key('\x1bO24~', 'F12')
CTRL_C = Key('\x03', 'ctrl-c')
CTRL_A = Key('\x01', 'ctrl-a')
CTRL_ALT_A = Key('\x1b\x01', 'ctrl-alt-a')
CTRL_ALT_DEL = Key('\x1b[3^', 'ctrl-alt-del')
CTRL_B = Key('\x02', 'ctrl-b')
CTRL_D = Key('\x04', 'ctrl-d')
CTRL_E = Key('\x05', 'ctrl-e')
CTRL_F = Key('\x06', 'ctrl-f')
CTRL_Z = Key('\x1a', 'ctrl-z')
SPECIAL_KEYS = (
SPACE, BACKSPACE, ENTER, ESCAPE, INSERT, END, HOME,
DELETE, DOWN, UP, LEFT, RIGHT,
F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12,
CTRL_C, CTRL_A, CTRL_ALT_A, CTRL_ALT_DEL, CTRL_B,
CTRL_D, CTRL_E, CTRL_F, CTRL_Z
)
keys = {
x: Key(x) for x in chain(string.ascii_letters, string.digits)
}
keys.update({
x.value: x for x in SPECIAL_KEYS
})
@classmethod
def get_key(cls, value, default=None):
return cls.keys.get(value) or default
class GetChar:
def __init__(self): # pragma: no cover
if is_windows():
# pylint: disable=import-error
import msvcrt
self.get_char = msvcrt.getwch
else:
import termios
import tty
def get_char():
# pylint: disable=assignment-from-no-return
fileno = sys.stdin.fileno()
old = termios.tcgetattr(fileno)
try:
tty.setraw(fileno)
raw = termios.tcgetattr(fileno)
raw[1] = old[1]
termios.tcsetattr(fileno, termios.TCSADRAIN, raw)
char = sys.stdin.read(1)
finally:
termios.tcsetattr(fileno, termios.TCSADRAIN, old)
return char
self.get_char = get_char
def __call__(self): # pragma: no cover
return self.get_char()
async def deferred(self, executor): # pragma: no cover
return await executor.execute(self.get_char)
getch = GetChar()
class KeyHandler: # pragma: no cover
def __init__(self, handlers, loop=None, default_handler=None): # pragma: no cover # noqa: E501
self.handlers = handlers
self.handlers.update({
Keys.CTRL_C: lambda _, stop: stop(),
})
self.default_handler = default_handler
self.loop = loop or asyncio.get_event_loop()
self.queue = asyncio.Queue(loop=self.loop)
self.stop_event = asyncio.Event(loop=self.loop)
self._consumer = None
self._producer = None
def stop(self): # pragma: no cover
self.stop_event.set()
def read(self): # pragma: no cover
# Make non-blocking.
while not self.stop_event.is_set():
char = getch()
asyncio.ensure_future(self.queue.put(char), loop=self.loop)
async def handle(self): # pragma: no cover
while not self.stop_event.is_set():
await asyncio.sleep(0.00001)
try:
msg = self.queue.get_nowait()
except asyncio.QueueEmpty:
pass
else:
handler = self.handlers.get(msg)
if handler:
handler(msg, self.stop)
elif self.default_handler:
self.default_handler(msg, self.stop)
async def __aenter__(self): # pragma: no cover
self._producer = threading.Thread(target=self.read)
self._producer.daemon = True
self._producer.start()
self._consumer = asyncio.ensure_future(self.handle(), loop=self.loop)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb): # pragma: no cover
self.stop()
await self._consumer
def print_keys(self, file=sys.stdout): # pragma: no cover
for k, v in self.handlers.items():
doc = getattr(v, '__doc__', getattr(v, '__name__', ''))
clean_key = Keys.get_key(k, k)
if k == Keys.CTRL_C:
continue
print(f'{clean_key}: {doc}', file=file)
if __name__ == '__main__': # pragma: no cover
main_loop = asyncio.get_event_loop()
async def main():
namespace = {
'i': 0
}
def hand(msg, stop):
namespace['i'] += 1
if namespace['i'] >= 10:
stop()
print(repr(msg), file=sys.stderr)
async with KeyHandler({}, default_handler=hand) as k:
k.print_keys()
print('Type 10 chars')
while not k.stop_event.is_set():
print('.', end='', flush=True)
await asyncio.sleep(1)
main_loop.run_until_complete(main())
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QKeySequence, QIcon
from qtpy.QtWidgets import (QApplication, QMainWindow, QMenu, QMessageBox,
QShortcut, QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app.utils import (
create_application, create_splash_screen, create_window,
delete_debug_log_files, qt_message_handler, set_links_color, setup_logging,
set_opengl_implementation)
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.find_plugins import find_external_plugins, find_internal_plugins
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import (
Plugins, SpyderPlugin, SpyderPluginV2, SpyderDockablePlugin,
SpyderPluginWidget)
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
sig_layout_setup_ready = Signal(object) # Related to default layouts
# ---- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
if plugin_name in PLUGIN_REGISTRY:
return PLUGIN_REGISTRY.get_plugin(plugin_name)
if error:
raise SpyderAPIError(f'Plugin "{plugin_name}" not found!')
return None
def get_dockable_plugins(self):
"""Get a list of all dockable plugins."""
dockable_plugins = []
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, (SpyderDockablePlugin, SpyderPluginWidget)):
dockable_plugins.append((plugin_name, plugin))
return dockable_plugins
def is_plugin_enabled(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_enabled(plugin_name)
def is_plugin_available(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_available(plugin_name)
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.NAME] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.NAME] = plugin
else:
self._INTERNAL_PLUGINS[plugin.NAME] = plugin
def register_plugin(self, plugin_name, external=False, omit_conf=False):
"""
Register a plugin in Spyder Main Window.
"""
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Connect Plugin Signals to main window methods
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Connect Main window Signals to plugin signals
self.sig_moved.connect(plugin.sig_mainwindow_moved)
self.sig_resized.connect(plugin.sig_mainwindow_resized)
# Register plugin
plugin._register(omit_conf=omit_conf)
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
if plugin_name == Plugins.Shortcuts:
for action, context, action_name in self.shortcut_queue:
self.register_shortcut(action, context, action_name)
self.shortcut_queue = []
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(action, context, action_name)
else:
self.shortcut_queue.append((action, context, action_name))
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(sc, context, name)
self.register_shortcut(
plugin.toggle_view_action, context, name)
else:
self.shortcut_queue.append((sc, context, name))
self.shortcut_queue.append(
(plugin.toggle_view_action, context, name))
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio_requested,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.shortcuts.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
shortcut = None
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.shortcuts.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
try:
self.widgetlist.remove(plugin)
except ValueError:
pass
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
# Set Windows app icon to use .ico file
if os.name == "nt":
qapp.setWindowIcon(ima.get_icon("windows_app_icon"))
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
self.shortcut_queue = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# ---- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
PLUGIN_REGISTRY.sig_plugin_ready.connect(
lambda plugin_name, omit_conf: self.register_plugin(
plugin_name, omit_conf=omit_conf))
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
# Instantiate internal Spyder 5 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPluginV2):
PLUGIN_REGISTRY.register_plugin(self, PluginClass,
external=False)
# Instantiate internal Spyder 4 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPlugin):
if plugin_name == Plugins.IPythonConsole:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
plugin_instance.sig_exception_occurred.connect(
self.handle_exception)
else:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
self.preferences.register_plugin_preferences(
plugin_instance)
# Instantiate external Spyder 5 plugins
for plugin_name in external_plugins:
if plugin_name in enabled_plugins:
PluginClass = external_plugins[plugin_name]
try:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=True)
# These attributes come from spyder.app.find_plugins to
# add plugins to the dependencies dialog
module = PluginClass._spyder_module_name
package_name = PluginClass._spyder_package_name
version = PluginClass._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (PluginClass, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = PLUGIN_REGISTRY.register_plugin(self, mod,
external=True)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if not hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.thirdparty_plugins.append(plugin)
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.plugins.mainmenu.api import (
ApplicationMenus, ToolsMenuSections, FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut,
id_='file_switcher')
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut,
id_='symbol_finder')
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None] + self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
id_='spyder_path_action')
from spyder.plugins.application.container import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = ApplicationActions.SpyderWindowsEnvVariables
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action,
before_section=ToolsMenuSections.External
)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
# Create external plugins before loading the layout to include them in
# the window restore state after restarts.
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance, Plugins.Console)
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin_instance.get_widget().toggle_view(False)
for plugin_name in PLUGIN_REGISTRY:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin, plugin_instance in self._PLUGINS.items():
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""
Actions to be performed only after the main window's `show` method
is triggered.
"""
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
# Call on_mainwindow_visible for all plugins.
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin.on_mainwindow_visible()
QApplication.processEvents()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Load project, if any.
# TODO: Remove this reference to projects once we can send the command
# line options to the plugins.
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# To avoid regressions. We shouldn't have loaded the modules
# below at this point.
if DEV is not None:
assert 'pandas' not in sys.modules
assert 'matplotlib' not in sys.modules
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
# TODO: Remove self.projects reference once there's an API for setting
# window title.
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
with open(self.SPYDER_PATH, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
with open(self.SPYDER_NOT_ACTIVE_PATH, 'r',
encoding='utf-8') as f:
not_active_path = f.read().splitlines()
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(MainWindow, app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
if get_debug_level() > 0:
delete_debug_log_files()
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(
MainWindow, app, splash, options, args
)
else:
mainwindow = create_window(MainWindow, app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
s3.py
|
from io import BytesIO
from threading import Thread
from django.conf import settings
try:
from minio import Minio, S3Error
from minio.deleteobjects import DeleteObject
from urllib3 import PoolManager
except ImportError:
# Enforce
settings.S3_BUCKET = None
_client = None
def client():
if not settings.S3_BUCKET:
raise Exception("Object storage is not configured")
global _client
if _client is None:
_client = Minio(
settings.S3_ENDPOINT,
settings.S3_ACCESS_KEY,
settings.S3_SECRET_KEY,
region=settings.S3_REGION,
http_client=PoolManager(timeout=settings.S3_TIMEOUT),
)
return _client
ASCII_J = ord("j")
ASCII_Z = ord("z")
def enc(n):
"""Generates an object key in the "<sorting prefix>-<n>" form.
>>> [enc(i) for i in range(0, 5)]
['zj-0', 'zi-1', 'zh-2', 'zg-3', 'zf-4']
The purpose of the sorting prefix is to sort keys with smaller n values
last:
>>> sorted([enc(i) for i in range(0, 5)])
['zf-4', 'zg-3', 'zh-2', 'zi-1', 'zj-0']
This allows efficient lookup of objects with n
values below a specific threshold. For example, the following
retrieves all keys at bucket's root directory with n < 123:
>>> client.list_objects(bucket_name, start_after=enc(123))
"""
s = str(n)
len_inverted = chr(ASCII_Z - len(s) + 1)
inverted = "".join(chr(ASCII_J - int(c)) for c in s)
return len_inverted + inverted + "-" + s
def get_object(code, n):
if not settings.S3_BUCKET:
return None
key = "%s/%s" % (code, enc(n))
response = None
try:
response = client().get_object(settings.S3_BUCKET, key)
return response.read()
except S3Error:
return None
finally:
if response:
response.close()
response.release_conn()
def put_object(code, n, data):
key = "%s/%s" % (code, enc(n))
while True:
try:
client().put_object(settings.S3_BUCKET, key, BytesIO(data), len(data))
break
except S3Error as e:
if e.code != "InternalError":
raise e
print("InternalError, retrying...")
def _remove_objects(code, upto_n):
if upto_n <= 0:
return
prefix = "%s/" % code
start_after = prefix + enc(upto_n + 1)
q = client().list_objects(settings.S3_BUCKET, prefix, start_after=start_after)
delete_objs = [DeleteObject(obj.object_name) for obj in q]
if delete_objs:
errors = client().remove_objects(settings.S3_BUCKET, delete_objs)
for e in errors:
print("remove_objects error: ", e)
def remove_objects(check_code, upto_n):
"""Removes keys with n values below or equal to `upto_n`.
The S3 API calls can take seconds to complete,
therefore run the removal code on thread.
"""
Thread(target=_remove_objects, args=(check_code, upto_n)).start()
|
test.py
|
import asyncio
import time
import re
import json
import sys
import threading
from queue import Queue
from multiprocessing import Process
from pyppeteer.launcher import connect
from threading import Thread
from lib.RedisUtil import RedisConf,RedisUtils
# from bs4 import BeautifulSoup as bs
from lib.headlesscrower import HeadlessCrawler
from lib.commons import TURL, hashmd5, get_basedomain, argsparse
from lib.MongoUtil import MongoConf, MongoUtils
from lib.UrlDeDuplicate import UrlPattern
from multi_process import AdvancedConcurrencyManager
#from pyppeteer.network_manager import Request
class SpiderWork(object):
'''
这个类是一个单页面爬取的类,保存链接至mongo
'''
def __init__(self, wsaddr, url, taskname, basedomain=1, cookie=None, goon=False, mongodb='spider', redisdb=1):
'''
:param wsaddr: websocket address of headless chrome
:param url: url to spider
:param taskname: taskname, used at redis pattern
:param cookie: the file contains the cookie
:param goon: if continue the spider
:param basedomian: the level of fetch domain from url
:param mongodb: the mongodb to save
'''
self.wsaddr = wsaddr
self.url = url
self.taskname = taskname
self.basedomain = basedomain
self.cookie = cookie
self.goon = goon
self.mongodb = mongodb
# host, port, db
mongoconf = MongoConf('127.0.0.1', 27017, mongodb)
self.mongoutil = MongoUtils(mongoconf)
redisconf = RedisConf(taskname, db=redisdb)
self.redisutil = RedisUtils(redisconf)
self.domain = get_basedomain(url, basedomain)
def fetchCookie(self):
cookie = None
try:
with open(self.cookie, 'r') as f:
cookie = json.load(f)
except Exception as e:
msg = '[fetchCookie] [Error] ' + repr(e)
print(msg)
cookie = None
return cookie
def sameOrigin(self, url, domain):
try:
turl = TURL(url)
#print("turl.netloc ={} domain={}".format(turl.netloc, domain))
assert turl.netloc.find(domain) >= 0, '{} is not belongs {}'.format(url, domain)
assert turl.is_block_host() == False, '{} is block host'.format(url)
assert turl.is_block_path() == False, '{} is block path'.format(url)
assert turl.is_ext_static() == False, '{} is static extention'.format(url)
return True
except Exception as e:
return False
async def _spider(self):
redisutil = self.redisutil
if not self.goon:
print("start from new url.....")
cookie = self.fetchCookie()
a = HeadlessCrawler(self.wsaddr, self.url, cookie=cookie)
await a.spider()
for url in a.collect_url:
u = url['url']
if not self.sameOrigin(u, self.domain):
continue
method = url['method']
pattern = UrlPattern(u).get_pattern()
pattern_md5 = hashmd5(pattern)
if 'request' in url:
result = json.dumps(url)
redisutil.insert_result(result)
redisutil.set_url_scanned(method, pattern_md5)
else:
if redisutil.is_url_scanned(method, pattern_md5):
pass
else:
task = json.dumps(url)
redisutil.insert_one_task(task)
redisutil.set_url_scanned(method, pattern_md5)
self.mongoutil.save(url)
tasks = [asyncio.ensure_future(self.worker()) for i in range(10)]
# print(tasks)
await asyncio.wait(tasks)
async def worker(self):
redisutil = self.redisutil
cookie = self.fetchCookie()
mongoutil = self.mongoutil
if self.goon:
pass
while True:
if redisutil.task_counts == 0:
break
task = redisutil.fetch_one_task()
url = json.loads(task)
# 同源
u = url['url']
print("=========================fetched Form Redis: {}==================".format(redisutil.result_counts))
depth = url['depth']
if depth > 3: # 超过四层就退出
print("---------------depth > 3-------------")
continue
a = HeadlessCrawler(self.wsaddr, u, cookie=cookie, depth=url['depth']+1)
await a.spider()
for url in a.collect_url:
u = url['url']
if not self.sameOrigin(u, self.domain):
continue
pattern = UrlPattern(u).get_pattern()
pattern_md5 = hashmd5(pattern)
method = url['method']
if 'request' in url:
result = json.dumps(url)
redisutil.insert_result(result)
redisutil.set_url_scanned(method, pattern_md5)
else:
if redisutil.is_url_scanned(method, pattern_md5):
pass
else:
task = json.dumps(url)
redisutil.insert_one_task(task)
redisutil.set_url_scanned(method, pattern_md5)
mongoutil.save(url)
async def _closePage(self):
'''
超时之后可能不会关闭Page,如果不自动关闭会导致过多的page页
'''
browser = await connect(browserWSEndpoint=self.wsaddr)
pages = await browser.pages()
for page in pages:
await page.close()
def spider(self):
start = time.time()
loop = asyncio.get_event_loop()
x = self._spider()
try:
tasks = [asyncio.ensure_future(x),]
loop.run_until_complete(asyncio.wait(tasks))
except KeyboardInterrupt as e:
# print(asyncio.Task.all_tasks())
for task in asyncio.Task.all_tasks():
# print(task.cancel())
task.cancel()
# close pages
closeTask = self._closePage()
tasks = [asyncio.ensure_future(closeTask),]
loop.run_until_complete(asyncio.wait(tasks))
loop.stop()
loop.run_forever()
finally:
loop.close()
print(time.time() - start)
async def worker(conf, wsaddr, cookie=None, domain='', goon=False, mongo=None):
redis_util = RedisUtils(conf)
mongoutil = mongo
if goon:
pass
# print("wsaddr={}\ndomain={}".format(wsaddr, domain))
while True:
# 退出条件?如果用广度优先遍历,那么深度到一定程序如4层,就可以退出了
# 或者redis的任务为0了,就可以退出了
if redis_util.task_counts == 0:
break
# if unscan_queue.empty():
# break
task = redis_util.fetch_one_task()
# task = unscan_queue.get()
url = json.loads(task)
# 同源
u = url['url']
print("=========================fetched Form Redis: {}==================".format(redis_util.result_counts))
depth = url['depth']
if depth > 3: # 超过四层就退出
print("---------------depth > 3-------------")
continue
a = HeadlessCrawler(wsaddr, u, cookie=cookie, depth=url['depth']+1)
await a.spider()
for url in a.collect_url:
u = url['url']
if not sameOrigin(u, domain):
continue
pattern = UrlPattern(u).get_pattern()
pattern_md5 = hashmd5(pattern)
method = url['method']
if 'request' in url:
result = json.dumps(url)
# # 插入结果,后续可以直接插入到Mongo里
redis_util.insert_result(result)
redis_util.set_url_scanned(method, pattern_md5)
else:
if redis_util.is_url_scanned(method, pattern_md5):
# print("[Pattern Found] [{}]".format(pattern))
pass
else:
task = json.dumps(url)
redis_util.insert_one_task(task)
redis_util.set_url_scanned(method, pattern_md5)
mongoutil.save(url)
# unscan_queue.put(task)
# scanned_set.add( pattern + "|" + pattern_md5)
def sameOrigin(url, domain):
try:
turl = TURL(url)
#print("turl.netloc ={} domain={}".format(turl.netloc, domain))
assert turl.netloc.find(domain) >= 0, '{} is not belongs {}'.format(url, domain)
assert turl.is_block_host() == False, '{} is block host'.format(url)
assert turl.is_block_path() == False, '{} is block path'.format(url)
assert turl.is_ext_static() == False, '{} is static extention'.format(url)
return True
except Exception as e:
return False
async def spider(wsaddr, url, taskname, cookie=None, goon=False, mongo=None):
# 2018-07-09 先写单线程,再写成生产者和消费者
conf = RedisConf(taskname, db=1)
redis_util = RedisUtils(conf)
unscan_queue = Queue()
result_queue = Queue()
scanned_set = set()
domain = get_basedomain(url)
print(domain)
# count = 0
# 设置domain
redis_util.set_task_domain(domain)
if not goon:
print("start from new url.....")
a = HeadlessCrawler(wsaddr, url, cookie=cookie)
await a.spider()
# print(a.collect_url)
for url in a.collect_url:
# 还可以判断一下url的类型
u = url['url']
# 先判断是否是同一个域名下的
if not sameOrigin(u, domain):
continue
method = url['method']
pattern = UrlPattern(u).get_pattern()
pattern_md5 = hashmd5(pattern)
if 'request' in url:
result = json.dumps(url)
# method = url['method']
# 插入结果,后续可以直接插入到Mongo里
redis_util.insert_result(result)
redis_util.set_url_scanned(method, pattern_md5)
else:
if redis_util.is_url_scanned(method, pattern_md5):
pass
else:
task = json.dumps(url)
redis_util.insert_one_task(task)
redis_util.set_url_scanned(method, pattern_md5)
mongo.save(url)
tasks = [asyncio.ensure_future(worker(conf, wsaddr, cookie, domain, mongo=mongo)) for i in range(10)]
# print(tasks)
await asyncio.wait(tasks)
'''
threads = []
for i in range(2):
t = Thread(target=workthread, args=(conf, wsaddr, cookie, domain))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
'''
# for i in range(20):
# # 20协程来跑
# await worker(redis_util, wsaddr, cookie, domain=domain)
# while True:
# # 退出条件?如果用广度优先遍历,那么深度到一定程序如4层,就可以退出了
# # 或者redis的任务为0了,就可以退出了
# if redis_util.task_counts == 0:
# break
# # if unscan_queue.empty():
# # break
# task = redis_util.fetch_one_task()
# # task = unscan_queue.get()
# url = json.loads(task)
# # 同源
# u = url['url']
# if not sameOrigin(u, domain):
# continue
# a = HeadlessCrawler(wsaddr, u, cookie=cookie, depth=url['depth']+1)
# await a.spider()
# for url in a.collect_url:
# # 还可以判断一下url的类型
# depth = url['depth']
# if depth > 3: # 超过四层就退出
# print("---------------depth > 3-------------")
# continue
# u = url['url']
# if not sameOrigin(u, domain):
# continue
# pattern = UrlPattern(u).get_pattern()
# pattern_md5 = hashmd5(pattern)
# method = url['method']
# if 'request' in url:
# result = json.dumps(url)
# # # 插入结果,后续可以直接插入到Mongo里
# redis_util.insert_result(result)
# redis_util.set_url_scanned(method, pattern_md5)
# else:
# if redis_util.is_url_scanned(method, pattern_md5):
# pass
# else:
# task = json.dumps(url)
# redis_util.insert_one_task(task)
# redis_util.set_url_scanned(method, pattern_md5)
# # unscan_queue.put(task)
# # scanned_set.add( pattern + "|" + pattern_md5)
print("-----------done------")
def main():
args = argsparse()
wsaddr = args.wsaddr
url = args.u
cookie_file = args.cookie
iqiyi_cookie = None
MongoConf.db = args.mongo_db
if cookie_file:
with open(cookie_file, 'r') as f:
iqiyi_cookie = json.load(f)
#print(iqiyi_cookie)
# print(type(iqiyi_cookie))
print(wsaddr, url)
# with open('fetched_url.json', 'w') as f:
# json.dump((a.fetched_url), f)
# wsaddr = 'ws://10.127.21.237:9222/devtools/browser/f3f68d37-aabb-43b7-9d75-986a8be08e2d'
# url = 'http://www.iqiyi.com'
taskname = args.taskname
spider = SpiderWork(wsaddr, url, taskname, mongodb='spiderworktest')
spider.spider()
'''
start = time.time()
loop = asyncio.get_event_loop()
x = spider(wsaddr, url, taskname, cookie=iqiyi_cookie, goon=False, mongo=mongoutil)
try:
tasks = [asyncio.ensure_future(x),]
loop.run_until_complete(asyncio.wait(tasks))
except KeyboardInterrupt as e:
print(asyncio.Task.all_tasks())
for task in asyncio.Task.all_tasks():
print(task.cancel())
loop.stop()
loop.run_forever()
finally:
loop.close()
print(time.time() - start)
'''
if __name__ == '__main__':
'''
p = Process(target=main)
p.daemon = True
p.start()
starttime = time.time()
while True:
t = time.time() - starttime
if t > 10 * 60:
print("timeout")
break
elif not p.is_alive():
break
else:
time.sleep(10)
'''
main()
|
main.py
|
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.split(rootPath)[0])
print(sys.path)
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
from src.spider.QQZoneSpider import QQZoneSpider
from src.util.constant import WEB_SPIDER_INFO, CLEAN_DATA_KEY, LOGIN_FAILED, \
USER_MAP_KEY, GET_MOOD_FAILED, MOOD_FINISH_KEY, WAITING_USER_LIST, FINISH_USER_NUM_KEY, USER_LOGIN_STATE
import threading
# 使用selenium自动登陆,获取空间全部说说内容,不下载图片
# 比较完整的一个接口,可直接调用
def capture_main_data():
print(sys.path)
sp = QQZoneSpider(use_redis=True, debug=True, download_small_image=False, download_big_image=False)
sp.login_with_qr_code()
sp.get_main_page_info()
sp.get_mood_list()
sp.user_info.save_user(sp.username)
# 提供给web的接口
def web_interface(username, nickname, stop_time, mood_num, cookie_text, no_delete, password, pool_flag):
sp = QQZoneAnalysis(use_redis=True, debug=False, username=username, analysis_friend=True, from_web=True,
nickname=nickname, stop_time=stop_time, mood_num=mood_num, no_delete=no_delete, cookie_text=cookie_text, pool_flag=pool_flag)
sp.re.hset(USER_MAP_KEY, username, password)
sp.re.set(USER_LOGIN_STATE + username, 0)
sp.logging_info(username + "init success")
try:
state = sp.login_with_qr_code()
sp.remove_qr_code()
# 登陆失败就退出本线程
if not state:
sp.logging_info(username + "logging failed")
sp.re.rpush(WEB_SPIDER_INFO + username, LOGIN_FAILED)
exit(1)
else:
# 存储登陆状态
sp.logging_info(username + "logging success")
sp.re.rpush(WEB_SPIDER_INFO + username, "用户" + str(sp.username) + "登陆成功")
sp.re.set(USER_LOGIN_STATE + username, 1)
except BaseException as e:
sp.format_error(e, "logging failed")
sp.logging_info(username + "logging failed")
sp.re.rpush(WEB_SPIDER_INFO + username, LOGIN_FAILED)
exit(1)
sp.get_main_page_info()
sp.logging_info("get main page success")
try:
# 获取动态的数据
t1 = threading.Thread(target=sp.get_mood_list)
# 获取好友数据
t2 = threading.Thread(target=sp.get_friend_detail)
t1.setDaemon(False)
t2.setDaemon(False)
t1.start()
t2.start()
# 等待两个线程都结束
t1.join()
t2.join()
# sp.user_info.save_user(username)
except BaseException:
sp.re.rpush(WEB_SPIDER_INFO + username, GET_MOOD_FAILED)
exit(1)
sp.re.set(MOOD_FINISH_KEY + str(username), 1)
# 清洗好友数据
friend_data_state = sp.clean_friend_data()
if friend_data_state:
# 获取第一位好友数据
sp.get_first_friend_info()
# 计算共同好友最多的人
sp.get_most_common_friend()
# 计算共同群组
sp.get_most_group()
sp.get_useful_info_from_json()
if not sp.mood_data_df.empty:
# 清洗说说数据并计算点赞最多的人和评论最多的人
sp.get_most_people()
# 计算发送动态的时间
sp.calculate_send_time()
sp.draw_cmt_cloud(sp.mood_data_df)
sp.draw_like_cloud(sp.mood_data_df)
# 说说中的关键字,这个比较花时间
# sp.draw_content_cloud(sp.mood_data_df)
# 保存说说数据
sp.export_mood_df()
sp.calculate_history_like_agree()
sp.user_info.save_user()
sp.re.set(CLEAN_DATA_KEY + username, 1)
now_user = sp.re.get(FINISH_USER_NUM_KEY)
if now_user is None:
now_user = 0
else:
now_user = int(now_user)
sp.re.set(FINISH_USER_NUM_KEY, now_user + 1)
# 对排队list中删除当前用户,注意该指令的传参方式与redis-cli中不同
sp.re.lrem(WAITING_USER_LIST, username)
def get_user_basic_info():
sp = QQZoneSpider(use_redis=True, debug=False, mood_begin=0, mood_num=-1,
stop_time='2015-06-01',
download_small_image=False, download_big_image=False,
download_mood_detail=True, download_like_detail=True,
download_like_names=True, recover=False, cookie_text=None)
return sp.user_info
if __name__ == '__main__':
capture_main_data()
|
launcher.py
|
# Copyright 2022 joetjo https://github.com/joetjo/MarkdownHelper
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import threading
class GhLauncher:
@staticmethod
def launch(label, exe, cwd=os.getcwd()):
print("Launcher: Launching {} ({}) from folder {} ".format(label, exe, cwd))
bg = threading.Thread(target=GhLauncher.launchImpl, args=(exe, cwd))
bg.start()
@staticmethod
def launchImpl(exe, cwd):
subprocess.run(exe, cwd=cwd)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 30 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.170)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
app.py
|
# coding=utf-8
from __future__ import print_function
import inspect
import logging
import numbers
import os
import six
import sys
import threading
import time
import warnings
import numpy as np
from os.path import isfile
from phi.data.fluidformat import Scene, write_sim_frame
from phi import struct
from phi.physics.field import Field, StaggeredGrid, CenteredGrid
from phi.physics.world import world, StateProxy
from phi.viz.plot import PlotlyFigureBuilder
from .value import EditableValue, EditableFloat, EditableInt, EditableBool, EditableString
from .control import Control, Action
def synchronized_method(method):
outer_lock = threading.Lock()
lock_name = '__' + method.__name__ + '_lock' + '__'
def sync_method(self, *args, **kws):
with outer_lock:
if not hasattr(self, lock_name):
setattr(self, lock_name, threading.Lock())
lock = getattr(self, lock_name)
with lock:
return method(self, *args, **kws)
return sync_method
class TimeDependentField(object):
def __init__(self, name, generator):
self.name = name
self.generator = generator
self.array = None
self.invalidation_version = -1
@synchronized_method
def get(self, invalidation_version):
if invalidation_version != self.invalidation_version:
self.array = self.generator()
self.invalidation_version = invalidation_version
return self.array
class App(object):
def __init__(self,
name=None,
subtitle='',
fields=None,
stride=None,
record_images=False, record_data=False,
base_dir='~/phi/data/',
recorded_fields=None,
summary=None,
custom_properties=None,
target_scene=None,
objects_to_save=None,
framerate=None):
self.start_time = time.time()
self.name = name if name is not None else self.__class__.__name__
self.subtitle = subtitle
self.summary = summary if summary else name
if fields:
self.fields = {name: TimeDependentField(name, generator) for (name, generator) in fields.items()}
else:
self.fields = {}
self.message = None
self.steps = 0
self._invalidation_counter = 0
self._controls = []
self._actions = []
self._traits = []
self.prepared = False
self.current_action = None
self._pause = False
self.detect_fields = 'default' # False, True, 'default'
self.world = world
# Setup directory & Logging
self.objects_to_save = [self.__class__] if objects_to_save is None else list(objects_to_save)
self.base_dir = os.path.expanduser(base_dir)
if not target_scene:
self.new_scene()
self.uses_existing_scene = False
else:
self.scene = target_scene
self.uses_existing_scene = True
if not isfile(self.scene.subpath('info.log')):
log_file = self.log_file = self.scene.subpath('info.log')
else:
index = 2
while True:
log_file = self.scene.subpath('info_%d.log' % index)
if not isfile(log_file):
break
else:
index += 1
# Setup logging
logFormatter = logging.Formatter('%(message)s (%(levelname)s), %(asctime)sn\n')
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.WARNING)
customLogger = logging.Logger('app', logging.DEBUG)
fileHandler = logging.FileHandler(log_file)
fileHandler.setFormatter(logFormatter)
customLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.INFO)
customLogger.addHandler(consoleHandler)
self.logger = customLogger
# Recording
self.record_images = record_images
self.record_data = record_data
self.recorded_fields = recorded_fields if recorded_fields is not None else []
self.rec_all_slices = False
self.sequence_stride = stride if stride is not None else 1
self.framerate = framerate if framerate is not None else stride
self._custom_properties = custom_properties if custom_properties else {}
self.figures = PlotlyFigureBuilder()
self.info('App created. Scene directory is %s' % self.scene.path)
def new_scene(self, count=None):
if count is None:
count = 1 if self.world.batch_size is None else self.world.batch_size
self.scene = Scene.create(self.base_dir, self.scene_summary(), count=count, mkdir=True)
@property
def directory(self):
return self.scene.path
@property
def image_dir(self):
return self.scene.subpath('images')
def get_image_dir(self):
return self.scene.subpath('images', create=True)
def progress(self):
self.step()
self.steps += 1
self.invalidate()
def invalidate(self):
self._invalidation_counter += 1
def step(self):
world.step()
@property
def fieldnames(self):
return sorted(self.fields.keys())
def get_field(self, fieldname):
if fieldname not in self.fields:
raise KeyError('Field %s not declared. Available fields are %s' % (fieldname, self.fields.keys()))
return self.fields[fieldname].get(self._invalidation_counter)
def add_field(self, name, value):
assert not self.prepared, 'Cannot add fields to a prepared model'
if isinstance(value, StateProxy):
def current_state():
return value.state
generator = current_state
elif callable(value):
generator = value
else:
assert isinstance(value, (np.ndarray, Field, float, int)), 'Unsupported type for field "%s": %s' % (name, type(value))
def get_constant():
return value
generator = get_constant
self.fields[name] = TimeDependentField(name, generator)
@property
def actions(self):
return self._actions
def add_action(self, name, methodcall):
self._actions.append(Action(name, methodcall, name))
def run_action(self, action):
message_before = self.message
action.method()
self.invalidate()
message_after = self.message
if message_before == message_after:
if self.message is None or self.message == '':
self.message = display_name(action.name)
else:
self.message += ' | ' + display_name(action.name)
@property
def traits(self):
return self._traits
def add_trait(self, trait):
assert not self.prepared, 'Cannot add traits to a prepared model'
self._traits.append(trait)
@property
def controls(self):
return self._controls
def prepare(self):
if self.prepared:
return
logging.info('Gathering model data...')
# Controls
for name in self.__dict__:
val = getattr(self, name)
editable_value = None
if isinstance(val, EditableValue):
editable_value = val
setattr(self, name, val.initial_value) # Replace EditableValue with initial value
elif name.startswith('value_'):
value_name = display_name(name[6:])
dtype = type(val)
if dtype == bool:
editable_value = EditableBool(value_name, val)
elif isinstance(val, numbers.Integral): # Int
editable_value = EditableInt(value_name, val)
elif isinstance(val, numbers.Number): # Float
editable_value = EditableFloat(value_name, val)
elif isinstance(val, six.string_types):
editable_value = EditableString(value_name, val)
if editable_value:
self._controls.append(Control(self, name, editable_value))
# Actions
for method_name in dir(self):
if method_name.startswith('action_') and callable(getattr(self, method_name)):
self._actions.append(Action(display_name(method_name[7:]), getattr(self, method_name), method_name))
# Default fields
if len(self.fields) == 0:
self._add_default_fields()
# Scene
self._update_scene_properties()
source_files_to_save = set()
for object in self.objects_to_save:
source_files_to_save.add(inspect.getabsfile(object))
for source_file in source_files_to_save:
self.scene.copy_src(source_file)
# End
self.prepared = True
return self
def _add_default_fields(self):
def add_default_field(trace):
field = trace.value
if isinstance(field, (CenteredGrid, StaggeredGrid)):
def field_generator():
world_state = self.world.state
return trace.find_in(world_state)
self.add_field(field.name[0].upper() + field.name[1:], field_generator)
return None
with struct.unsafe():
struct.map(add_default_field, world.state, leaf_condition=lambda x: isinstance(x, (CenteredGrid, StaggeredGrid)), trace=True)
def add_custom_property(self, key, value):
self._custom_properties[key] = value
if self.prepared:
self._update_scene_properties()
def add_custom_properties(self, dict):
self._custom_properties.update(dict)
if self.prepared:
self._update_scene_properties()
def _update_scene_properties(self):
if self.uses_existing_scene:
return
app_name = os.path.basename(inspect.getfile(self.__class__))
app_path = inspect.getabsfile(self.__class__)
properties = {
'instigator': 'App',
'traits': self.traits,
'app': str(app_name),
'app_path': str(app_path),
'name': self.name,
'description': self.subtitle,
'all_fields': self.fieldnames,
'actions': [action.name for action in self.actions],
'controls': [{control.name: control.value} for control in self.controls],
'summary': self.scene_summary(),
'time_of_writing': self.steps,
'world': struct.properties_dict(self.world.state)
}
properties.update(self.custom_properties())
self.scene.properties = properties
def settings_str(self):
return ''.join([
' ' + str(control) for control in self.controls
])
def custom_properties(self):
return self._custom_properties
def info(self, message):
message = str(message)
self.message = message
self.logger.info(message)
def debug(self, message):
logging.info(message)
def scene_summary(self):
return self.summary
def show(self, *args, **kwargs):
warnings.warn("Use show(model) instead.", DeprecationWarning, stacklevel=2)
from phi.viz.display import show
show(self, *args, **kwargs)
@property
def status(self):
pausing = '/Pausing' if self._pause and self.current_action else ''
action = self.current_action if self.current_action else 'Idle'
message = (' - %s' % self.message) if self.message else ''
return '{}{} ({}){}'.format(action, pausing, self.steps, message)
def run_step(self, framerate=None, allow_recording=True):
self.current_action = 'Running'
starttime = time.time()
try:
self.progress()
if allow_recording and self.steps % self.sequence_stride == 0:
self.record_frame()
if framerate is not None:
duration = time.time() - starttime
rest = 1.0/framerate - duration
if rest > 0:
self.current_action = 'Waiting'
time.sleep(rest)
except Exception as e:
self.info('Error during %s.step() \n %s: %s' % (type(self).__name__, type(e).__name__, e))
self.logger.exception(e)
finally:
self.current_action = None
def play(self, max_steps=None, callback=None, framerate=None, allow_recording=True, callback_if_aborted=False):
if framerate is None:
framerate = self.framerate
def target():
self._pause = False
step_count = 0
while not self._pause:
self.run_step(framerate=framerate, allow_recording=allow_recording)
step_count += 1
if max_steps and step_count >= max_steps:
break
if callback is not None:
if not self._pause or callback_if_aborted:
callback()
thread = threading.Thread(target=target)
thread.start()
return self
def pause(self):
self._pause = True
@property
def running(self):
return self.current_action is not None
def record_frame(self):
self.current_action = 'Recording'
files = []
if self.record_images:
os.path.isdir(self.image_dir) or os.makedirs(self.image_dir)
arrays = [self.get_field(field) for field in self.recorded_fields]
for name, array in zip(self.recorded_fields, arrays):
files += self.figures.save_figures(self.image_dir, name, self.steps, array)
if self.record_data:
arrays = [self.get_field(field) for field in self.recorded_fields]
arrays = [a.staggered_tensor() if isinstance(a, StaggeredGrid) else a.data for a in arrays]
names = [n.lower() for n in self.recorded_fields]
files += write_sim_frame(self.directory, arrays, names, self.steps)
if files:
self.message = 'Frame written to %s' % files
self.current_action = None
def benchmark(self, sequence_count):
self._pause = False
step_count = 0
starttime = time.time()
for i in range(sequence_count):
self.run_step(framerate=np.inf, allow_recording=False)
step_count += 1
if self._pause:
break
time_elapsed = time.time() - starttime
return step_count, time_elapsed
def config_recording(self, images, data, fields):
self.record_images = images
self.record_data = data
self.recorded_fields = fields
def display_name(python_name):
n = list(python_name)
n[0] = n[0].upper()
for i in range(1, len(n)):
if n[i] == '_':
n[i] = ' '
if len(n) > i+1:
n[i+1] = n[i+1].upper()
return ''.join(n)
|
baby-a3c.py
|
# Baby Advantage Actor-Critic | Sam Greydanus | October 2017 | MIT License
from __future__ import print_function
import torch, os, gym, time, glob, argparse, sys
import numpy as np
from scipy.signal import lfilter
# from scipy.misc import imresize # preserves single-pixel info _unlike_ img = img[::2,::2]
import cv2
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
os.environ['OMP_NUM_THREADS'] = '1'
def get_args():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--env', default='Breakout-v4', type=str, help='gym environment')
parser.add_argument('--processes', default=20, type=int, help='number of processes to train with')
parser.add_argument('--render', default=False, type=bool, help='renders the atari environment')
parser.add_argument('--test', default=False, type=bool, help='sets lr=0, chooses most likely actions')
parser.add_argument('--rnn_steps', default=20, type=int, help='steps to train LSTM over')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--seed', default=1, type=int, help='seed random # generators (for reproducibility)')
parser.add_argument('--gamma', default=0.99, type=float, help='rewards discount factor')
parser.add_argument('--tau', default=1.0, type=float, help='generalized advantage estimation discount')
parser.add_argument('--horizon', default=0.99, type=float, help='horizon for running averages')
parser.add_argument('--hidden', default=256, type=int, help='hidden size of GRU')
return parser.parse_args()
discount = lambda x, gamma: lfilter([1],[1,-gamma],x[::-1])[::-1] # discounted rewards one liner
# prepro = lambda img: imresize(img[35:195].mean(2), (80,80)).astype(np.float32).reshape(1,80,80)/255.
prepro = lambda img: cv2.resize(img[35:195].mean(2), (80,80)).astype(np.float32).reshape(1,80,80)/255.
def printlog(args, s, end='\n', mode='a'):
print(s, end=end)
f=open(args.save_dir+'log.txt',mode)
f.write(s+'\n')
f.close()
class NNPolicy(nn.Module): # an actor-critic neural network
def __init__(self, channels, memsize, num_actions):
super(NNPolicy, self).__init__()
self.conv1 = nn.Conv2d(channels, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.gru = nn.GRUCell(32 * 5 * 5, memsize)
self.critic_linear, self.actor_linear = nn.Linear(memsize, 1), nn.Linear(memsize, num_actions)
def forward(self, inputs, train=True, hard=False):
inputs, hx = inputs
x = F.elu(self.conv1(inputs))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
x = F.elu(self.conv4(x))
hx = self.gru(x.view(-1, 32 * 5 * 5), (hx))
return self.critic_linear(hx), self.actor_linear(hx), hx
def try_load(self, save_dir):
# 이전의 모델 state가 있다면, load하고, try했던 step을 return 한다.
paths = glob.glob(save_dir + '*.tar')
step = 0
if len(paths) > 0:
ckpts = [int(s.split('.')[-2]) for s in paths]
ix = np.argmax(ckpts) ; step = ckpts[ix]
self.load_state_dict(torch.load(paths[ix]))
print("\tno saved models") if step is 0 else print("\tloaded model: {}".format(paths[ix]))
return step
class SharedAdam(torch.optim.Adam): # extend a pytorch optimizer so it shares grads across processes
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['shared_steps'], state['step'] = torch.zeros(1).share_memory_(), 0
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_().share_memory_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_().share_memory_()
def step(self, closure=None):
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
self.state[p]['shared_steps'] += 1
self.state[p]['step'] = self.state[p]['shared_steps'][0] - 1 # a "step += 1" comes later
super.step(closure)
def cost_func(args, values, logps, actions, rewards):
# values([21, 1]), logps([20, 4]), actions([20]), rewards(20,)
np_values = values.view(-1).data.numpy() # np_values(21,)
# generalized advantage estimation using \delta_t residuals (a policy gradient method)
delta_t = np.asarray(rewards) + args.gamma * np_values[1:] - np_values[:-1] # delta_t(20,)
# logpys = logps.gather(1, torch.tensor(actions).view(-1,1)) # logps 의 dim=1 에 대해, 각각 action에 해당하는 값을 추출.
logpys = logps.gather(1, actions.clone().detach().view(-1, 1)) # logps 의 dim=1 에 대해, 각각 action에 해당하는 값을 추출.
gen_adv_est = discount(delta_t, args.gamma * args.tau)
policy_loss = -(logpys.view(-1) * torch.FloatTensor(gen_adv_est.copy())).sum()
# l2 loss over value estimator
rewards[-1] += args.gamma * np_values[-1]
discounted_r = discount(np.asarray(rewards), args.gamma)
discounted_r = torch.tensor(discounted_r.copy(), dtype=torch.float32)
value_loss = .5 * (discounted_r - values[:-1,0]).pow(2).sum()
entropy_loss = (-logps * torch.exp(logps)).sum() # entropy definition, for entropy regularization
return policy_loss + 0.5 * value_loss - 0.01 * entropy_loss
def train(shared_model, shared_optimizer, rank, args, dict_info):
env = gym.make(args.env) # make a local (unshared) environment
env.seed(args.seed + rank)
torch.manual_seed(args.seed + rank) # seed everything
model = NNPolicy(channels=1, memsize=args.hidden, num_actions=args.num_actions) # a local/unshared model
img = env.reset()
state = torch.tensor(prepro(img)) # img -> (1, 80, 80), 0~1.0 의 범위의 값으로 .
start_time = last_disp_time = time.time()
episode_length, episode_rewards, episode_losses, done = 0, 0, 0, True # bookkeeping
while dict_info['frames'][0] <= 8e7 or args.test: # openai baselines uses 40M frames...we'll use 80M
# 기본적으로 frame이 80M 이하이면, train을 한다.
model.load_state_dict(shared_model.state_dict()) # sync with shared model
hx = torch.zeros(1, 256) if done else hx.detach() # rnn activation vector
values, logps, actions, rewards = [], [], [], [] # save values for computing gradientss
for step in range(args.rnn_steps):
episode_length += 1
value, logit, hx = model((state.view(1,1,80,80), hx))
logp = F.log_softmax(logit, dim=-1)
action = torch.exp(logp).multinomial(num_samples=1).data[0] # 확률에 의한 sampling 작업.
state, reward, done, _ = env.step(action.numpy()[0])
if args.render:
env.render()
state = torch.tensor(prepro(state)) # img -> (1, 80, 80), 0~1.0 의 범위의 값으로
episode_rewards += reward
reward = np.clip(reward, -1, 1) # reward
done = done or episode_length >= 1e4 # don't playing one ep for too long
dict_info['frames'].add_(1)
num_frames = int(dict_info['frames'].item())
if num_frames % 2e6 == 0: # save every 2M frames
printlog(args, '\n\t{:.0f}M frames: saved model\n'.format(num_frames/1e6))
torch.save(shared_model.state_dict(), args.save_dir+'model.{:.0f}.tar'.format(num_frames/1e6))
if done: # update shared data
dict_info['episodes'] += 1
interp = 1 if dict_info['episodes'][0] == 1 else 1 - args.horizon
dict_info['run_episode_rewards'].mul_(1 - interp).add_(interp * episode_rewards) # 이 전의 것은 99%, 새로 추가는 1% 반영
dict_info['run_episode_losses'].mul_(1 - interp).add_(interp * episode_losses)
if rank == 0 and time.time() - last_disp_time > 60: # 0 worker에서 매 분마다.
elapsed = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - start_time)) # 시작부터 걸린시간.
printlog(args, 'time {}, episodes {:.0f}, frames {:.1f}M, mean episode_rewards {:.2f}, run loss {:.2f}'
.format(elapsed, dict_info['episodes'].item(), num_frames / 1e6,
dict_info['run_episode_rewards'].item(), dict_info['run_episode_losses'].item()))
last_disp_time = time.time()
if done: # maybe print info.
episode_length, episode_rewards, episode_losses = 0, 0, 0
state = torch.tensor(prepro(env.reset()))
values.append(value) ; logps.append(logp) ; actions.append(action) ; rewards.append(reward)
next_value = torch.zeros(1,1) if done else model((state.unsqueeze(0), hx))[0]
values.append(next_value.detach()) # next state에 대한 value만 추가한다.
loss = cost_func(args, torch.cat(values), torch.cat(logps), torch.cat(actions), np.asarray(rewards))
episode_losses += loss.item()
shared_optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 40)
for param, shared_param in zip(model.parameters(), shared_model.parameters()):
if shared_param.grad is None:
shared_param._grad = param.grad # sync gradients with shared model
shared_optimizer.step()
if __name__ == "__main__":
if sys.version_info[0] > 2:
mp.set_start_method('spawn') # this must not be in global scope
elif sys.platform == 'linux' or sys.platform == 'linux2':
raise Exception("Must be using Python 3 with linux!") # or else you get a deadlock in conv2d
args = get_args()
args.save_dir = '{}/'.format(args.env.lower()) # keep the directory structure simple
if args.render:
args.processes = 1
# args.test = True # render mode 에선도 train mode을 사용한다.
if args.test:
args.lr = 0 # don't train in render mode
args.num_actions = gym.make(args.env).action_space.n # get the action space of this game
os.makedirs(args.save_dir) if not os.path.exists(args.save_dir) else None # make dir to save models etc.
torch.manual_seed(args.seed)
shared_model = NNPolicy(channels=1, memsize=args.hidden, num_actions=args.num_actions).share_memory()
shared_optimizer = SharedAdam(shared_model.parameters(), lr=args.lr)
dict_info = {k: torch.DoubleTensor([0]).share_memory_() for k in ['run_episode_rewards', 'run_episode_losses', 'episodes', 'frames']}
dict_info['frames'] += shared_model.try_load(args.save_dir) * 1e6
if int(dict_info['frames'].item()) == 0:
printlog(args, '', end='', mode='w') # clear log file
processes = []
for rank in range(args.processes):
p = mp.Process(target=train, args=(shared_model, shared_optimizer, rank, args, dict_info))
p.start()
processes.append(p)
for p in processes: p.join()
|
slotmachine_GUI.py
|
import kivy
import time
import threading
from random import randint
from kivy.app import App
from kivy.base import Builder
from kivy.properties import NumericProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.graphics import Color, Rectangle
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
from kivy.properties import NumericProperty
from kivy.properties import BooleanProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock, mainthread
from kivy.config import Config
from win32api import GetSystemMetrics
from slotmachine import Slotmachine_engine
import locale
import sys
locale.setlocale(locale.LC_ALL, 'de_DE.utf-8')
if sys.platform == 'win32':
Config.set('graphics', 'width', GetSystemMetrics(0)-100)
Config.set('graphics', 'height', GetSystemMetrics(1)-100)
else:
Config.set('graphics', 'width', '1200')
Config.set('graphics', 'height', '800')
Game = Slotmachine_engine(1,50)
class MyTextInput(TextInput):
max_characters = NumericProperty(0)
def insert_text(self, substring, from_undo=False):
if len(self.text) > 3 and 3 > 0:
substring = ''
TextInput.insert_text(self, substring, from_undo)
class MyGrid(Widget):
def start_game(self):
self.color_winning_lines(0)
self.get_number_of_symbols()
self.get_payout()
self.shuffle_numbers()
Game.start_round()
self.credits.text = str(Game.get_credits())
self.get_rows()
def get_rows(self):
dic_name_to_picsource = {'10': 'icons/cherry.png',
'Jack': 'icons/strawberry.png',
'Queen': 'icons/citrone.png',
'King': 'icons/banana.png',
'Ace': 'icons/grape.png',
'SEVEN': 'icons/SEVEN.png'}
self.wheel_1_0.source = dic_name_to_picsource[Game.get_rows()[0][0]]
self.wheel_2_0.source = dic_name_to_picsource[Game.get_rows()[0][1]]
self.wheel_3_0.source = dic_name_to_picsource[Game.get_rows()[0][2]]
self.wheel_4_0.source = dic_name_to_picsource[Game.get_rows()[0][3]]
self.wheel_5_0.source = dic_name_to_picsource[Game.get_rows()[0][4]]
self.wheel_1_1.source = dic_name_to_picsource[Game.get_rows()[1][0]]
self.wheel_2_1.source = dic_name_to_picsource[Game.get_rows()[1][1]]
self.wheel_3_1.source = dic_name_to_picsource[Game.get_rows()[1][2]]
self.wheel_4_1.source = dic_name_to_picsource[Game.get_rows()[1][3]]
self.wheel_5_1.source = dic_name_to_picsource[Game.get_rows()[1][4]]
self.wheel_1_2.source = dic_name_to_picsource[Game.get_rows()[2][0]]
self.wheel_2_2.source = dic_name_to_picsource[Game.get_rows()[2][1]]
self.wheel_3_2.source = dic_name_to_picsource[Game.get_rows()[2][2]]
self.wheel_4_2.source = dic_name_to_picsource[Game.get_rows()[2][3]]
self.wheel_5_2.source = dic_name_to_picsource[Game.get_rows()[2][4]]
def shuffle_numbers(self):
threading.Thread(target=self.shuffle_numbers_thread).start()
def shuffle_numbers_thread(self):
dic_name_to_picsource = {'10': 'icons/cherry.png',
'Jack': 'icons/strawberry.png',
'Queen': 'icons/citrone.png',
'King': 'icons/banana.png',
'Ace': 'icons/grape.png',
'SEVEN': 'icons/SEVEN.png'}
shuffle_pictures = ['10']*5 + ['Jack'] * 5 + ['Queen'] * 5 + ['King'] * 5 + ['Ace'] * 5 + ['SEVEN'] * 5
for i in range(10):
if i < 5:
self.wheel_1_0.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_1_1.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_1_2.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
else:
self.wheel_1_0.source = dic_name_to_picsource[Game.get_rows()[0][0]]
self.wheel_1_1.source = dic_name_to_picsource[Game.get_rows()[1][0]]
self.wheel_1_2.source = dic_name_to_picsource[Game.get_rows()[2][0]]
if i < 6:
self.wheel_2_0.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_2_1.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_2_2.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
else:
self.wheel_2_0.source = dic_name_to_picsource[Game.get_rows()[0][1]]
self.wheel_2_1.source = dic_name_to_picsource[Game.get_rows()[1][1]]
self.wheel_2_2.source = dic_name_to_picsource[Game.get_rows()[2][1]]
if i < 7:
self.wheel_3_0.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_3_1.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_3_2.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
else:
self.wheel_3_0.source = dic_name_to_picsource[Game.get_rows()[0][2]]
self.wheel_3_1.source = dic_name_to_picsource[Game.get_rows()[1][2]]
self.wheel_3_2.source = dic_name_to_picsource[Game.get_rows()[2][2]]
if i < 8:
self.wheel_4_0.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_4_1.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_4_2.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
else:
self.wheel_4_0.source = dic_name_to_picsource[Game.get_rows()[0][3]]
self.wheel_4_1.source = dic_name_to_picsource[Game.get_rows()[1][3]]
self.wheel_4_2.source = dic_name_to_picsource[Game.get_rows()[2][3]]
if i < 9:
self.wheel_5_0.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_5_1.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
self.wheel_5_2.source = dic_name_to_picsource[str(shuffle_pictures[randint(0,len(shuffle_pictures)-2)])]
else:
self.wheel_5_0.source = dic_name_to_picsource[Game.get_rows()[0][4]]
self.wheel_5_1.source = dic_name_to_picsource[Game.get_rows()[1][4]]
self.wheel_5_2.source = dic_name_to_picsource[Game.get_rows()[2][4]]
time.sleep(0.035)
self.color_winning_lines(1)
if Game.get_win() > 0:
self.win.text = str(Game.get_win())
def color_winning_lines(self, i):
self.ow = i
Clock.schedule_once(self.color_winning_lines_thread, 0)
def check_input_size(self):
for i in [self.gui_w1_nS1,
self.gui_w2_nS1,
self.gui_w3_nS1,
self.gui_w4_nS1,
self.gui_w5_nS1,
self.gui_w1_nS2,
self.gui_w2_nS2,
self.gui_w3_nS2,
self.gui_w4_nS2,
self.gui_w5_nS2,
self.gui_w1_nS3,
self.gui_w2_nS3,
self.gui_w3_nS3,
self.gui_w4_nS3,
self.gui_w5_nS3,
self.gui_w1_nS4,
self.gui_w2_nS4,
self.gui_w3_nS4,
self.gui_w4_nS4,
self.gui_w5_nS4,
self.gui_w1_nS5,
self.gui_w2_nS5,
self.gui_w3_nS5,
self.gui_w4_nS5,
self.gui_w5_nS5,
self.gui_w1_nS6,
self.gui_w2_nS6,
self.gui_w3_nS6,
self.gui_w4_nS6,
self.gui_w5_nS6]:
if int(i.text.replace('.','')) > 99:
i.text = '99'
def get_number_of_symbols(self):
self.gui_w1_nS1.text = str(locale.format_string('%d', Game.w1_nS1, 1))
self.gui_w2_nS1.text = str(locale.format_string('%d', Game.w2_nS1, 1))
self.gui_w3_nS1.text = str(locale.format_string('%d', Game.w3_nS1, 1))
self.gui_w4_nS1.text = str(locale.format_string('%d', Game.w4_nS1, 1))
self.gui_w5_nS1.text = str(locale.format_string('%d', Game.w5_nS1, 1))
self.gui_w1_nS2.text = str(locale.format_string('%d', Game.w1_nS2, 1))
self.gui_w2_nS2.text = str(locale.format_string('%d', Game.w2_nS2, 1))
self.gui_w3_nS2.text = str(locale.format_string('%d', Game.w3_nS2, 1))
self.gui_w4_nS2.text = str(locale.format_string('%d', Game.w4_nS2, 1))
self.gui_w5_nS2.text = str(locale.format_string('%d', Game.w5_nS2, 1))
self.gui_w1_nS3.text = str(locale.format_string('%d', Game.w1_nS3, 1))
self.gui_w2_nS3.text = str(locale.format_string('%d', Game.w2_nS3, 1))
self.gui_w3_nS3.text = str(locale.format_string('%d', Game.w3_nS3, 1))
self.gui_w4_nS3.text = str(locale.format_string('%d', Game.w4_nS3, 1))
self.gui_w5_nS3.text = str(locale.format_string('%d', Game.w5_nS3, 1))
self.gui_w1_nS4.text = str(locale.format_string('%d', Game.w1_nS4, 1))
self.gui_w2_nS4.text = str(locale.format_string('%d', Game.w2_nS4, 1))
self.gui_w3_nS4.text = str(locale.format_string('%d', Game.w3_nS4, 1))
self.gui_w4_nS4.text = str(locale.format_string('%d', Game.w4_nS4, 1))
self.gui_w5_nS4.text = str(locale.format_string('%d', Game.w5_nS4, 1))
self.gui_w1_nS5.text = str(locale.format_string('%d', Game.w1_nS5, 1))
self.gui_w2_nS5.text = str(locale.format_string('%d', Game.w2_nS5, 1))
self.gui_w3_nS5.text = str(locale.format_string('%d', Game.w3_nS5, 1))
self.gui_w4_nS5.text = str(locale.format_string('%d', Game.w4_nS5, 1))
self.gui_w5_nS5.text = str(locale.format_string('%d', Game.w5_nS5, 1))
self.gui_w1_nS6.text = str(locale.format_string('%d', Game.w1_nS6, 1))
self.gui_w2_nS6.text = str(locale.format_string('%d', Game.w2_nS6, 1))
self.gui_w3_nS6.text = str(locale.format_string('%d', Game.w3_nS6, 1))
self.gui_w4_nS6.text = str(locale.format_string('%d', Game.w4_nS6, 1))
self.gui_w5_nS6.text = str(locale.format_string('%d', Game.w5_nS6, 1))
def change_number_of_symbols(self):
if int(self.gui_w1_nS1.text) + int(self.gui_w1_nS2.text) + int(self.gui_w1_nS3.text) + int(self.gui_w1_nS4.text) + int(self.gui_w1_nS5.text) + int(self.gui_w1_nS6.text) < 6:
Game.w1_nS1 = 1
Game.w1_nS2 = 1
Game.w1_nS3 = 1
Game.w1_nS4 = 1
Game.w1_nS5 = 1
Game.w1_nS6 = 1
else:
Game.w1_nS1 = int(self.gui_w1_nS1.text)
Game.w1_nS2 = int(self.gui_w1_nS2.text)
Game.w1_nS3 = int(self.gui_w1_nS3.text)
Game.w1_nS4 = int(self.gui_w1_nS4.text)
Game.w1_nS5 = int(self.gui_w1_nS5.text)
Game.w1_nS6 = int(self.gui_w1_nS6.text)
if int(self.gui_w2_nS1.text) + int(self.gui_w2_nS2.text) + int(self.gui_w2_nS3.text) + int(self.gui_w2_nS4.text) + int(self.gui_w2_nS5.text) + int(self.gui_w2_nS6.text) < 6:
Game.w2_nS1 = 1
Game.w2_nS2 = 1
Game.w2_nS3 = 1
Game.w2_nS4 = 1
Game.w2_nS5 = 1
Game.w2_nS6 = 1
else:
Game.w2_nS1 = int(self.gui_w2_nS1.text)
Game.w2_nS2 = int(self.gui_w2_nS2.text)
Game.w2_nS3 = int(self.gui_w2_nS3.text)
Game.w2_nS4 = int(self.gui_w2_nS4.text)
Game.w2_nS5 = int(self.gui_w2_nS5.text)
Game.w2_nS6 = int(self.gui_w2_nS6.text)
if int(self.gui_w3_nS1.text) + int(self.gui_w3_nS2.text) + int(self.gui_w3_nS3.text) + int(self.gui_w3_nS4.text) + int(self.gui_w3_nS5.text) + int(self.gui_w3_nS6.text) < 6:
Game.w3_nS1 = 1
Game.w3_nS2 = 1
Game.w3_nS3 = 1
Game.w3_nS4 = 1
Game.w3_nS5 = 1
Game.w3_nS6 = 1
else:
Game.w3_nS1 = int(self.gui_w3_nS1.text)
Game.w3_nS2 = int(self.gui_w3_nS2.text)
Game.w3_nS3 = int(self.gui_w3_nS3.text)
Game.w3_nS4 = int(self.gui_w3_nS4.text)
Game.w3_nS5 = int(self.gui_w3_nS5.text)
Game.w3_nS6 = int(self.gui_w3_nS6.text)
if int(self.gui_w4_nS1.text) + int(self.gui_w4_nS2.text) + int(self.gui_w4_nS3.text) + int(self.gui_w4_nS4.text) + int(self.gui_w4_nS5.text) + int(self.gui_w4_nS6.text) < 6:
Game.w4_nS1 = 1
Game.w4_nS2 = 1
Game.w4_nS3 = 1
Game.w4_nS4 = 1
Game.w4_nS5 = 1
Game.w4_nS6 = 1
else:
Game.w4_nS1 = int(self.gui_w4_nS1.text)
Game.w4_nS2 = int(self.gui_w4_nS2.text)
Game.w4_nS3 = int(self.gui_w4_nS3.text)
Game.w4_nS4 = int(self.gui_w4_nS4.text)
Game.w4_nS5 = int(self.gui_w4_nS5.text)
Game.w4_nS6 = int(self.gui_w4_nS6.text)
if int(self.gui_w5_nS1.text) + int(self.gui_w5_nS2.text) + int(self.gui_w5_nS3.text) + int(self.gui_w5_nS4.text) + int(self.gui_w5_nS5.text) + int(self.gui_w5_nS6.text) < 6:
Game.w5_nS1 = 1
Game.w5_nS2 = 1
Game.w5_nS3 = 1
Game.w5_nS4 = 1
Game.w5_nS5 = 1
Game.w5_nS6 = 1
else:
Game.w5_nS1 = int(self.gui_w5_nS1.text)
Game.w5_nS2 = int(self.gui_w5_nS2.text)
Game.w5_nS3 = int(self.gui_w5_nS3.text)
Game.w5_nS4 = int(self.gui_w5_nS4.text)
Game.w5_nS5 = int(self.gui_w5_nS5.text)
Game.w5_nS6 = int(self.gui_w5_nS6.text)
def get_payout(self):
self.gui_s1_5times.text = str(locale.format_string('%d', Game.s1_5times, 1))
self.gui_s2_5times.text = str(locale.format_string('%d', Game.s2_5times, 1))
self.gui_s3_5times.text = str(locale.format_string('%d', Game.s3_5times, 1))
self.gui_s4_5times.text = str(locale.format_string('%d', Game.s4_5times, 1))
self.gui_s5_5times.text = str(locale.format_string('%d', Game.s5_5times, 1))
self.gui_s1_4times.text = str(locale.format_string('%d', Game.s1_4times, 1))
self.gui_s2_4times.text = str(locale.format_string('%d', Game.s2_4times, 1))
self.gui_s3_4times.text = str(locale.format_string('%d', Game.s3_4times, 1))
self.gui_s4_4times.text = str(locale.format_string('%d', Game.s4_4times, 1))
self.gui_s5_4times.text = str(locale.format_string('%d', Game.s5_4times, 1))
self.gui_s1_3times.text = str(locale.format_string('%d', Game.s1_3times, 1))
self.gui_s2_3times.text = str(locale.format_string('%d', Game.s2_3times, 1))
self.gui_s3_3times.text = str(locale.format_string('%d', Game.s3_3times, 1))
self.gui_s4_3times.text = str(locale.format_string('%d', Game.s4_3times, 1))
self.gui_s5_3times.text = str(locale.format_string('%d', Game.s5_3times, 1))
def change_payout(self):
for i in [self.gui_s1_5times,
self.gui_s2_5times,
self.gui_s3_5times,
self.gui_s4_5times,
self.gui_s5_5times,
self.gui_s1_4times,
self.gui_s2_4times,
self.gui_s3_4times,
self.gui_s4_4times,
self.gui_s5_4times,
self.gui_s1_3times,
self.gui_s2_3times,
self.gui_s3_3times,
self.gui_s4_3times,
self.gui_s5_3times]:
if int(i.text) > 999999:
i.text = '999999'
Game.s1_5times = int(self.gui_s1_5times.text)
Game.s2_5times = int(self.gui_s2_5times.text)
Game.s3_5times = int(self.gui_s3_5times.text)
Game.s4_5times = int(self.gui_s4_5times.text)
Game.s5_5times = int(self.gui_s5_5times.text)
Game.s1_4times = int(self.gui_s1_4times.text)
Game.s2_4times = int(self.gui_s2_4times.text)
Game.s3_4times = int(self.gui_s3_4times.text)
Game.s4_4times = int(self.gui_s4_4times.text)
Game.s5_4times = int(self.gui_s5_4times.text)
Game.s1_3times = int(self.gui_s1_3times.text)
Game.s2_3times = int(self.gui_s2_3times.text)
Game.s3_3times = int(self.gui_s3_3times.text)
Game.s4_3times = int(self.gui_s4_3times.text)
Game.s5_3times = int(self.gui_s5_3times.text)
def update_number_of_possible_ways(self):
self.gui_npw_s1_5times.text = str(locale.format_string('%d', (Game.w1_nS1 * Game.w2_nS1 * Game.w3_nS1 * Game.w4_nS1 * Game.w5_nS1 * 5), 1))
self.gui_npw_s2_5times.text = str(locale.format_string('%d', (Game.w1_nS2 * Game.w2_nS2 * Game.w3_nS2 * Game.w4_nS2 * Game.w5_nS2 * 5), 1))
self.gui_npw_s3_5times.text = str(locale.format_string('%d', (Game.w1_nS3 * Game.w2_nS3 * Game.w3_nS3 * Game.w4_nS3 * Game.w5_nS3 * 5), 1))
self.gui_npw_s4_5times.text = str(locale.format_string('%d', (Game.w1_nS4 * Game.w2_nS4 * Game.w3_nS4 * Game.w4_nS4 * Game.w5_nS4 * 5), 1))
self.gui_npw_s5_5times.text = str(locale.format_string('%d', (Game.w1_nS5 * Game.w2_nS5 * Game.w3_nS5 * Game.w4_nS5 * Game.w5_nS5 * 5), 1))
self.gui_npw_s6_5times.text = str(locale.format_string('%d', (Game.w1_nS6 * Game.w2_nS6 * Game.w3_nS6 * Game.w4_nS6 * Game.w5_nS6 * 5), 1))
self.gui_npw_s1_4times.text = str(locale.format_string('%d', Game.w1_nS1 * Game.w2_nS1 * Game.w3_nS1 * Game.w4_nS1 * (sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) - Game.w5_nS1) * 5, 1))
self.gui_npw_s2_4times.text = str(locale.format_string('%d', Game.w1_nS2 * Game.w2_nS2 * Game.w3_nS2 * Game.w4_nS2 * (sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) - Game.w5_nS2) * 5, 1))
self.gui_npw_s3_4times.text = str(locale.format_string('%d', Game.w1_nS3 * Game.w2_nS3 * Game.w3_nS3 * Game.w4_nS3 * (sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) - Game.w5_nS3) * 5, 1))
self.gui_npw_s4_4times.text = str(locale.format_string('%d', Game.w1_nS4 * Game.w2_nS4 * Game.w3_nS4 * Game.w4_nS4 * (sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) - Game.w5_nS4) * 5, 1))
self.gui_npw_s5_4times.text = str(locale.format_string('%d', Game.w1_nS5 * Game.w2_nS5 * Game.w3_nS5 * Game.w4_nS5 * (sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) - Game.w5_nS5) * 5, 1))
self.gui_npw_s6_4times.text = str(locale.format_string('%d', Game.w1_nS6 * Game.w2_nS6 * Game.w3_nS6 * Game.w4_nS6 * (sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) - Game.w5_nS6) * 5, 1))
self.gui_npw_s1_3times.text = str(locale.format_string('%d',Game.w1_nS1 * Game.w2_nS1 * Game.w3_nS1 * (sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]) - Game.w4_nS1) * sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) * 5, 1))
self.gui_npw_s2_3times.text = str(locale.format_string('%d',Game.w1_nS2 * Game.w2_nS2 * Game.w3_nS2 * (sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]) - Game.w4_nS2) * sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) * 5, 1))
self.gui_npw_s3_3times.text = str(locale.format_string('%d',Game.w1_nS3 * Game.w2_nS3 * Game.w3_nS3 * (sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]) - Game.w4_nS3) * sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) * 5, 1))
self.gui_npw_s4_3times.text = str(locale.format_string('%d',Game.w1_nS4 * Game.w2_nS4 * Game.w3_nS4 * (sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]) - Game.w4_nS4) * sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) * 5, 1))
self.gui_npw_s5_3times.text = str(locale.format_string('%d',Game.w1_nS5 * Game.w2_nS5 * Game.w3_nS5 * (sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]) - Game.w4_nS5) * sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) * 5, 1))
self.gui_npw_s6_3times.text = str(locale.format_string('%d',Game.w1_nS6 * Game.w2_nS6 * Game.w3_nS6 * (sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]) - Game.w4_nS6) * sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]) * 5, 1))
def update_return(self):
self.gui_return_s1_5times.text = str(locale.format_string('%d', int(self.gui_npw_s1_5times.text.replace('.','')) * Game.s1_5times, 1))
self.gui_return_s2_5times.text = str(locale.format_string('%d', int(self.gui_npw_s2_5times.text.replace('.','')) * Game.s2_5times, 1))
self.gui_return_s3_5times.text = str(locale.format_string('%d', int(self.gui_npw_s3_5times.text.replace('.','')) * Game.s3_5times, 1))
self.gui_return_s4_5times.text = str(locale.format_string('%d', int(self.gui_npw_s4_5times.text.replace('.','')) * Game.s4_5times, 1))
self.gui_return_s5_5times.text = str(locale.format_string('%d', int(self.gui_npw_s5_5times.text.replace('.','')) * Game.s5_5times, 1))
self.gui_return_s6_5times.text = str(locale.format_string('%d', int(self.gui_npw_s6_5times.text.replace('.','')) * Game.s6_5times, 1))
self.gui_return_s1_4times.text = str(locale.format_string('%d', int(self.gui_npw_s1_4times.text.replace('.','')) * Game.s1_4times, 1))
self.gui_return_s2_4times.text = str(locale.format_string('%d', int(self.gui_npw_s2_4times.text.replace('.','')) * Game.s2_4times, 1))
self.gui_return_s3_4times.text = str(locale.format_string('%d', int(self.gui_npw_s3_4times.text.replace('.','')) * Game.s3_4times, 1))
self.gui_return_s4_4times.text = str(locale.format_string('%d', int(self.gui_npw_s4_4times.text.replace('.','')) * Game.s4_4times, 1))
self.gui_return_s5_4times.text = str(locale.format_string('%d', int(self.gui_npw_s5_4times.text.replace('.','')) * Game.s5_4times, 1))
self.gui_return_s6_4times.text = str(locale.format_string('%d', int(self.gui_npw_s6_4times.text.replace('.','')) * Game.s6_4times, 1))
self.gui_return_s1_3times.text = str(locale.format_string('%d', int(self.gui_npw_s1_3times.text.replace('.','')) * Game.s1_3times, 1))
self.gui_return_s2_3times.text = str(locale.format_string('%d', int(self.gui_npw_s2_3times.text.replace('.','')) * Game.s2_3times, 1))
self.gui_return_s3_3times.text = str(locale.format_string('%d', int(self.gui_npw_s3_3times.text.replace('.','')) * Game.s3_3times, 1))
self.gui_return_s4_3times.text = str(locale.format_string('%d', int(self.gui_npw_s4_3times.text.replace('.','')) * Game.s4_3times, 1))
self.gui_return_s5_3times.text = str(locale.format_string('%d', int(self.gui_npw_s5_3times.text.replace('.','')) * Game.s5_3times, 1))
self.gui_return_s6_3times.text = str(locale.format_string('%d', int(self.gui_npw_s6_3times.text.replace('.','')) * Game.s6_3times, 1))
def color_winning_lines_thread(self, ow):
ow = self.ow
winning_lines = Game.get_winnig_lines()
if ow == 1 and winning_lines != []:
for i in winning_lines:
if i[0] == 0:
self.wheel_1_0.source = self.wheel_1_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_1_0.source else self.wheel_1_0.source
self.wheel_2_0.source = self.wheel_2_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_2_0.source else self.wheel_2_0.source
self.wheel_3_0.source = self.wheel_3_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_3_0.source else self.wheel_3_0.source
if i[0] == 0 and i[1] == 4:
self.wheel_4_0.source = self.wheel_4_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_0.source else self.wheel_4_0.source
if i[0] == 0 and i[1] == 5:
self.wheel_4_0.source = self.wheel_4_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_0.source else self.wheel_4_0.source
self.wheel_5_0.source = self.wheel_5_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_5_0.source else self.wheel_5_0.source
if i[0] == 1:
self.wheel_1_1.source = self.wheel_1_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_1_1.source else self.wheel_1_1.source
self.wheel_2_1.source = self.wheel_2_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_2_1.source else self.wheel_2_1.source
self.wheel_3_1.source = self.wheel_3_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_3_1.source else self.wheel_3_1.source
if i[0] == 1 and i[1] == 4:
self.wheel_4_1.source = self.wheel_4_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_1.source else self.wheel_4_1.source
if i[0] == 1 and i[1] == 5:
self.wheel_4_1.source = self.wheel_4_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_1.source else self.wheel_4_1.source
self.wheel_5_1.source = self.wheel_5_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_5_1.source else self.wheel_5_1.source
if i[0] == 2:
self.wheel_1_2.source = self.wheel_1_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_1_2.source else self.wheel_1_2.source
self.wheel_2_2.source = self.wheel_2_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_2_2.source else self.wheel_2_2.source
self.wheel_3_2.source = self.wheel_3_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_3_2.source else self.wheel_3_2.source
if i[0] == 2 and i[1] == 4:
self.wheel_4_2.source = self.wheel_4_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_2.source else self.wheel_4_2.source
if i[0] == 2 and i[1] == 5:
self.wheel_4_2.source = self.wheel_4_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_2.source else self.wheel_4_2.source
self.wheel_5_2.source = self.wheel_5_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_5_2.source else self.wheel_5_2.source
if i[0] == 3:
self.wheel_1_0.source = self.wheel_1_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_1_0.source else self.wheel_1_0.source
self.wheel_2_1.source = self.wheel_2_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_2_1.source else self.wheel_2_1.source
self.wheel_3_2.source = self.wheel_3_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_3_2.source else self.wheel_3_2.source
if i[0] == 3 and i[1] == 4:
self.wheel_4_1.source = self.wheel_4_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_1.source else self.wheel_4_1.source
if i[0] == 3 and i[1] == 5:
self.wheel_4_1.source = self.wheel_4_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_1.source else self.wheel_4_1.source
self.wheel_5_0.source = self.wheel_5_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_5_0.source else self.wheel_5_0.source
if i[0] == 4:
self.wheel_1_2.source = self.wheel_1_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_1_2.source else self.wheel_1_2.source
self.wheel_2_1.source = self.wheel_2_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_2_1.source else self.wheel_2_1.source
self.wheel_3_0.source = self.wheel_3_0.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_3_0.source else self.wheel_3_0.source
if i[0] == 4 and i[1] == 4:
self.wheel_4_1.source = self.wheel_4_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_1.source else self.wheel_4_1.source
if i[0] == 4 and i[1] == 5:
self.wheel_4_1.source = self.wheel_4_1.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_4_1.source else self.wheel_4_1.source
self.wheel_5_2.source = self.wheel_5_2.source.split('.')[0] + '_win.png' if 'win' not in self.wheel_5_2.source else self.wheel_5_2.source
elif ow == 0:
self.wheel_1_0.source = self.wheel_1_0.source.split('_')[0] + '.png' if '_' in self.wheel_1_0.source else self.wheel_1_0.source
self.wheel_2_0.source = self.wheel_2_0.source.split('_')[0] + '.png' if '_' in self.wheel_2_0.source else self.wheel_2_0.source
self.wheel_3_0.source = self.wheel_3_0.source.split('_')[0] + '.png' if '_' in self.wheel_3_0.source else self.wheel_3_0.source
self.wheel_4_0.source = self.wheel_4_0.source.split('_')[0] + '.png' if '_' in self.wheel_4_0.source else self.wheel_4_0.source
self.wheel_5_0.source = self.wheel_5_0.source.split('_')[0] + '.png' if '_' in self.wheel_5_0.source else self.wheel_5_0.source
self.wheel_1_1.source = self.wheel_1_1.source.split('_')[0] + '.png' if '_' in self.wheel_1_1.source else self.wheel_1_1.source
self.wheel_2_1.source = self.wheel_2_1.source.split('_')[0] + '.png' if '_' in self.wheel_2_1.source else self.wheel_2_1.source
self.wheel_3_1.source = self.wheel_3_1.source.split('_')[0] + '.png' if '_' in self.wheel_3_1.source else self.wheel_3_1.source
self.wheel_4_1.source = self.wheel_4_1.source.split('_')[0] + '.png' if '_' in self.wheel_4_1.source else self.wheel_4_1.source
self.wheel_5_1.source = self.wheel_5_1.source.split('_')[0] + '.png' if '_' in self.wheel_5_1.source else self.wheel_5_1.source
self.wheel_1_2.source = self.wheel_1_2.source.split('_')[0] + '.png' if '_' in self.wheel_1_2.source else self.wheel_1_2.source
self.wheel_2_2.source = self.wheel_2_2.source.split('_')[0] + '.png' if '_' in self.wheel_2_2.source else self.wheel_2_2.source
self.wheel_3_2.source = self.wheel_3_2.source.split('_')[0] + '.png' if '_' in self.wheel_3_2.source else self.wheel_3_2.source
self.wheel_4_2.source = self.wheel_4_2.source.split('_')[0] + '.png' if '_' in self.wheel_4_2.source else self.wheel_4_2.source
self.wheel_5_2.source = self.wheel_5_2.source.split('_')[0] + '.png' if '_' in self.wheel_5_2.source else self.wheel_5_2.source
def update_statistics(self):
self.check_input_size()
self.change_number_of_symbols()
self.get_number_of_symbols()
self.change_payout()
self.update_number_of_possible_ways()
self.update_return()
sum_w1 = Game.w1_nS1 + Game.w1_nS2 + Game.w1_nS3 + Game.w1_nS4 + Game.w1_nS5 + Game.w1_nS6
sum_w2 = Game.w2_nS1 + Game.w2_nS2 + Game.w2_nS3 + Game.w2_nS4 + Game.w2_nS5 + Game.w2_nS6
sum_w3 = Game.w3_nS1 + Game.w3_nS2 + Game.w3_nS3 + Game.w3_nS4 + Game.w3_nS5 + Game.w3_nS6
sum_w4 = Game.w4_nS1 + Game.w4_nS2 + Game.w4_nS3 + Game.w4_nS4 + Game.w4_nS5 + Game.w4_nS6
sum_w5 = Game.w5_nS1 + Game.w5_nS2 + Game.w5_nS3 + Game.w5_nS4 + Game.w5_nS5 + Game.w5_nS6
self.gui_sum_n_possible_ways_5_times.text = str(locale.format_string('%d', int(self.gui_npw_s1_5times.text.replace('.','')) + int(self.gui_npw_s2_5times.text.replace('.','')) + int(self.gui_npw_s3_5times.text.replace('.','')) + int(self.gui_npw_s4_5times.text.replace('.','')) + int(self.gui_npw_s5_5times.text.replace('.','')) + int(self.gui_npw_s6_5times.text.replace('.','')), 1))
self.gui_sum_n_possible_ways_4_times.text = str(locale.format_string('%d', int(self.gui_npw_s1_4times.text.replace('.','')) + int(self.gui_npw_s2_4times.text.replace('.','')) + int(self.gui_npw_s3_4times.text.replace('.','')) + int(self.gui_npw_s4_4times.text.replace('.','')) + int(self.gui_npw_s5_4times.text.replace('.','')) + int(self.gui_npw_s6_4times.text.replace('.','')), 1))
self.gui_sum_n_possible_ways_3_times.text = str(locale.format_string('%d', int(self.gui_npw_s1_3times.text.replace('.','')) + int(self.gui_npw_s2_3times.text.replace('.','')) + int(self.gui_npw_s3_3times.text.replace('.','')) + int(self.gui_npw_s4_3times.text.replace('.','')) + int(self.gui_npw_s5_3times.text.replace('.','')) + int(self.gui_npw_s6_3times.text.replace('.','')), 1))
self.gui_sum_return_5_times.text = str(locale.format_string('%d', int(self.gui_return_s1_5times.text.replace('.','')) + int(self.gui_return_s2_5times.text.replace('.','')) + int(self.gui_return_s3_5times.text.replace('.','')) + int(self.gui_return_s4_5times.text.replace('.','')) + int(self.gui_return_s5_5times.text.replace('.','')) + int(self.gui_return_s6_5times.text.replace('.','')), 1))
self.gui_sum_return_4_times.text = str(locale.format_string('%d', int(self.gui_return_s1_4times.text.replace('.','')) + int(self.gui_return_s2_4times.text.replace('.','')) + int(self.gui_return_s3_4times.text.replace('.','')) + int(self.gui_return_s4_4times.text.replace('.','')) + int(self.gui_return_s5_4times.text.replace('.','')) + int(self.gui_return_s6_4times.text.replace('.','')), 1))
self.gui_sum_return_3_times.text = str(locale.format_string('%d', int(self.gui_return_s1_3times.text.replace('.','')) + int(self.gui_return_s2_3times.text.replace('.','')) + int(self.gui_return_s3_3times.text.replace('.','')) + int(self.gui_return_s4_3times.text.replace('.','')) + int(self.gui_return_s5_3times.text.replace('.','')) + int(self.gui_return_s6_3times.text.replace('.','')), 1))
self.gui_w1_nS6_sum.text = str(sum([Game.w1_nS1, Game.w1_nS2, Game.w1_nS3, Game.w1_nS4, Game.w1_nS5, Game.w1_nS6]))
self.gui_w2_nS6_sum.text = str(sum([Game.w2_nS1, Game.w2_nS2, Game.w2_nS3, Game.w2_nS4, Game.w2_nS5, Game.w2_nS6]))
self.gui_w3_nS6_sum.text = str(sum([Game.w3_nS1, Game.w3_nS2, Game.w3_nS3, Game.w3_nS4, Game.w3_nS5, Game.w3_nS6]))
self.gui_w4_nS6_sum.text = str(sum([Game.w4_nS1, Game.w4_nS2, Game.w4_nS3, Game.w4_nS4, Game.w4_nS5, Game.w4_nS6]))
self.gui_w5_nS6_sum.text = str(sum([Game.w5_nS1, Game.w5_nS2, Game.w5_nS3, Game.w5_nS4, Game.w5_nS5, Game.w5_nS6]))
self.gui_nr_total_coms.text = f'{sum_w1} * {sum_w2} * {sum_w3} * {sum_w4} * {sum_w5}'
self.gui_nr_total_coms_sum.text = str(locale.format_string('%d', sum_w1*sum_w2*sum_w3*sum_w4*sum_w5, 1))
self.gui_likelihood_getting_line.text = f'({self.gui_sum_n_possible_ways_5_times.text} + {self.gui_sum_n_possible_ways_4_times.text} + {self.gui_sum_n_possible_ways_3_times.text})\n------------------------------------------------\n {self.gui_nr_total_coms_sum.text}'
self.gui_gui_likelihood_getting_line_percent.text = str(round((int(self.gui_sum_n_possible_ways_5_times.text.replace('.','')) + int(self.gui_sum_n_possible_ways_4_times.text.replace('.','')) + int(self.gui_sum_n_possible_ways_3_times.text.replace('.',''))) / int(self.gui_nr_total_coms_sum.text.replace('.',''))*100,2)) + '%'
self.gui_expected_return.text = f'({self.gui_sum_return_5_times.text} + {self.gui_sum_return_4_times.text} + {self.gui_sum_return_3_times.text})\n------------------------------------------------------\n {self.gui_nr_total_coms_sum.text}'
self.gui_expected_return_percent.text = str(round((int(self.gui_sum_return_5_times.text.replace('.','')) + int(self.gui_sum_return_4_times.text.replace('.','')) + int(self.gui_sum_return_3_times.text.replace('.','')))/int(self.gui_nr_total_coms_sum.text.replace('.',''))*100,2)) + '%'
def refill_credits(self):
if int(self.refilled_credits.text) > 999999:
self.refilled_credits.text = '999999'
credits = int(self.refilled_credits.text)
Game.credits = credits
self.credits.text = str(Game.get_credits())
def test_run(self):
threading.Thread(target=self.test_run_thread).start()
def test_run_thread(self):
run_count = int(self.gui_test_run.text)
Game.credits = run_count
for i in range(run_count):
Game.start_round()
self.credits.text = str(Game.get_credits())
self.get_rows()
self.color_winning_lines(0)
self.color_winning_lines(1)
class SlotmachineGUI(App):
def build(self):
return MyGrid()
if __name__ == '__main__':
SlotmachineGUI().run()
|
miniterm.py
|
#!/usr/bin/python3
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry()
if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter(
'UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter(
'UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(
sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
# map the BS key (which yields DEL) to backspace
c = unichr(8)
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x)
for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48)
for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(
target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write(
'--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write(
'--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write(
'--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write(
'--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
# ~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write(
'--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write(
'--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write(
'--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write(
'--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
# ~ elif c == '\x01': # CTRL+A -> cycle escape mode
# ~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write(
'--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write(
'--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write(
'\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write(
'--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write(
'\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write(
'--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write(
'\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write(
'--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write(
'--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write(
'--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write(
'--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write(
'--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write(
'--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write(
'\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write(
'--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write(
'--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write(
'--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write(
'could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
wifi_scanner.py
|
#!/anaconda3/bin/python
import socket
import re
import subprocess, os
import platform
from ipaddress import ip_network, IPv4Network, IPv4Address, ip_address
import time
from threading import Lock, Thread
from getmac import get_mac_address
def scan_network(target_net: IPv4Network):
active_hosts = [] # all active host ip
lock = Lock() # for threading
threads = [] # all threads
# to direct output of subprocess to
FNULL = open(os.devnull, 'w')
def ping(ip):
# windows ping uses -n instead of -c
param = '-n' if platform.system().lower()=='windows' else '-c'
command = ['ping', param, '1', ip]
# call ping command
result = subprocess.call(command, stdout=FNULL, stderr=subprocess.STDOUT)
# if ping successful
if result == 0:
lock.acquire()
active_hosts.append(ip)
lock.release()
start_time = time.time()
print(f"Scanning {target_net.compressed}...", end="\n\n")
# iterate through hosts
for host in target_net.hosts():
host_ip = str(host.compressed)
t = Thread(target=ping, args=(host_ip, ))
threads.append(t)
threads[-1].start()
for thread in threads:
thread.join()
# display hosts and information
print(f"Found {len(active_hosts)} active hosts.")
for count, host_ip in enumerate(active_hosts):
print(f"\nHost {count + 1}")
print("-----------------------")
try:
hostname = socket.gethostbyaddr(host_ip)[0]
except socket.herror:
hostname = "Unknown"
host_mac = get_mac_address(ip=host_ip)
print(f"Host:\t{hostname}")
print(f"IP:\t{host_ip}")
print(f"MAC:\t{host_mac}")
time.sleep(.5)
# elapsed_time = (dt.datetime.now() - start_time).total_seconds()
elapsed_time = time.time() - start_time
print(f"\nNetwork Scan Finished.\nTime Elapsed: {elapsed_time:.3f} seconds.")
os._exit(1)
# main process
def main():
print('''
_____ .__ /\ __ __._____________.__ _________
/ _ \ _____|__| ____ ___)/ ______ / \ / \__\_ _____/|__| / _____/ ____ _____ ____ ____ ___________
/ /_\ \ / ___/ |/ __ \ / \ / ___/ \ \/\/ / || __) | | \_____ \_/ ___\\__ \ / \ / \_/ __ \_ __ \
/ | \\___ \| \ ___/| | \\___ \ \ /| || \ | | / \ \___ / __ \| | \ | \ ___/| | \/
\____|__ /____ >__|\___ >___| /____ > \__/\ / |__|\___ / |__| /_______ /\___ >____ /___| /___| /\___ >__|
\/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/ \/
''')
# check if input is ipv4 with cidr notation
while True:
target_net = input("Please enter target network to scan: ")
if re.match('(\d+[.]){3}\d+/\d+', target_net):
target_net: IPv4Network = ip_network(target_net)
break
elif re.match('(\d+[.]){3}\d+', target_net):
print("Please enter a network not a single host.")
else:
print("Please enter valid IPv4 address with CIDR notation.")
scan_network(target_net)
if __name__ == "__main__":
main()
|
MainApplication.py
|
# Author: Moises Henrique Pereira
# this class handle the interface and the dash application (if needed)
import sys
from PyQt5 import QtWidgets
# from Dash.DashApp import dashApp
# import threading
from .MainApplicationWindow import MainApplicationWindow
class MainApplication():
def __init__(self):
self.MainApplicationWindow = None
# this line enable to run dash in another thread
# threading.Thread(target=runDash, args=(True,True), daemon=True).start()
# this function instantiates the QApplication to be possible to instantiates the MainWindow widget
def run(self):
app = QtWidgets.QApplication(sys.argv)
self.mainApplicationWindow = MainApplicationWindow()
self.mainApplicationWindow.show()
sys.exit(app.exec_())
# this function run the dash server
# def runDash(debug, use_reloader):
# dashApp.run_server(debug=debug, use_reloader=use_reloader)
|
sigfinder.py
|
#!/usr/bin/python3
import argparse
import concurrent.futures
import json
import logging
import os
import socket
import subprocess
import threading
import time
import bjoern
import falcon
import jinja2
import pandas as pd
import requests
import schedule
from prometheus_client import Counter
from prometheus_client import Gauge
from prometheus_client import start_http_server
from gamutrf.sigwindows import calc_db
from gamutrf.sigwindows import choose_record_signal
from gamutrf.sigwindows import choose_recorders
from gamutrf.sigwindows import find_sig_windows
from gamutrf.sigwindows import get_center
from gamutrf.sigwindows import parse_freq_excluded
from gamutrf.utils import MTU
SOCKET_TIMEOUT = 1.0
ROLLOVERHZ = 100e6
PEAK_DBS = {}
def falcon_response(resp, text, status):
resp.status = status
resp.text = text
resp.content_type = 'text/html'
def ok_response(resp, text='ok!'):
falcon_response(resp, text=text, status=falcon.HTTP_200)
def error_response(resp, text='error!'):
falcon_response(resp, text=text, status=falcon.HTTP_500)
def load_template(name):
path = os.path.join('templates', name)
with open(os.path.abspath(path), 'r') as fp:
return jinja2.Template(fp.read())
class ActiveRequests:
def on_get(self, req, resp):
all_jobs = schedule.get_jobs()
ok_response(resp, f'{all_jobs}')
class ScannerForm:
def on_get(self, req, resp):
template = load_template('scanner_form.html')
ok_response(resp, template.render(bins=PEAK_DBS))
class Result:
def on_post(self, req, resp):
# TODO validate input
try:
recorder = f'http://{req.media["worker"]}:8000/'
signal_hz = int(int(req.media['frequency']) * 1e6)
record_bps = int(int(req.media['bandwidth']) * (1024 * 1024))
record_samples = int(record_bps * int(req.media['duration']))
recorder_args = f'record/{signal_hz}/{record_samples}/{record_bps}'
timeout = int(req.media['duration'])
response = None
if int(req.media['repeat']) == -1:
schedule.every(timeout).seconds.do(run_threaded, record, recorder=recorder,
recorder_args=recorder_args, timeout=timeout).tag(f'{recorder}{recorder_args}-{timeout}')
ok_response(resp)
else:
response = recorder_req(recorder, recorder_args, timeout)
time.sleep(timeout)
for _ in range(int(req.media['repeat'])):
response = recorder_req(recorder, recorder_args, timeout)
time.sleep(timeout)
if response:
ok_response(resp)
else:
ok_response(
resp, f'Request {recorder} {recorder_args} failed.')
except Exception as e:
error_response(resp, f'{e}')
def record(recorder, recorder_args, timeout):
recorder_req(recorder, recorder_args, timeout)
def run_threaded(job_func, recorder, recorder_args, timeout):
job_thread = threading.Thread(
target=job_func, args=(recorder, recorder_args, timeout,))
job_thread.start()
def init_prom_vars():
prom_vars = {
'last_bin_freq_time': Gauge('last_bin_freq_time', 'epoch time last signal in each bin', labelnames=('bin_mhz',)),
'worker_record_request': Gauge('worker_record_request', 'record requests made to workers', labelnames=('worker',)),
'freq_power': Gauge('freq_power', 'bin frequencies and db over time', labelnames=('bin_freq',)),
'new_bins': Counter('new_bins', 'frequencies of new bins', labelnames=('bin_freq',)),
'old_bins': Counter('old_bins', 'frequencies of old bins', labelnames=('bin_freq',)),
'bin_freq_count': Counter('bin_freq_count', 'count of signals in each bin', labelnames=('bin_mhz',)),
'frame_counter': Counter('frame_counter', 'number of frames processed'),
}
return prom_vars
def update_prom_vars(peak_dbs, new_bins, old_bins, prom_vars):
freq_power = prom_vars['freq_power']
new_bins_prom = prom_vars['new_bins']
old_bins_prom = prom_vars['old_bins']
for freq in peak_dbs:
freq_power.labels(bin_freq=freq).set(peak_dbs[freq])
for nbin in new_bins:
new_bins_prom.labels(bin_freq=nbin).inc()
for obin in old_bins:
old_bins_prom.labels(bin_freq=obin).inc()
def process_fft(args, prom_vars, ts, fftbuffer, lastbins):
global PEAK_DBS
tsc = time.ctime(ts)
logging.info(f'new frame at {tsc}')
df = pd.DataFrame(fftbuffer, columns=['ts', 'freq', 'db'])
df['freq'] /= 1e6
df = calc_db(df)
monitor_bins = set()
peak_dbs = {}
bin_freq_count = prom_vars['bin_freq_count']
last_bin_freq_time = prom_vars['last_bin_freq_time']
freq_start_mhz = args.freq_start / 1e6
freq_end_mhz = args.freq_end / 1e6
for signal in find_sig_windows(df, window=args.window, threshold=args.threshold):
start_freq, end_freq = signal[:2]
peak_db = signal[-1]
center_freq = start_freq + ((end_freq - start_freq) / 2)
if center_freq < freq_start_mhz or center_freq > freq_end_mhz:
print(f'ignoring {center_freq}')
continue
center_freq = get_center(
center_freq, freq_start_mhz, args.bin_mhz, args.record_bw_mbps)
bin_freq_count.labels(bin_mhz=center_freq).inc()
last_bin_freq_time.labels(bin_mhz=ts).set(ts)
monitor_bins.add(center_freq)
peak_dbs[center_freq] = peak_db
logging.info('current bins %f to %f MHz: %s',
df['freq'].min(), df['freq'].max(), sorted(peak_dbs.items()))
PEAK_DBS = sorted(peak_dbs.items())
new_bins = monitor_bins - lastbins
if new_bins:
logging.info('new bins: %s', sorted(new_bins))
old_bins = lastbins - monitor_bins
if old_bins:
logging.info('old bins: %s', sorted(old_bins))
update_prom_vars(peak_dbs, new_bins, old_bins, prom_vars)
return monitor_bins
def recorder_req(recorder, recorder_args, timeout):
url = f'{recorder}/v1/{recorder_args}'
try:
req = requests.get(url, timeout=timeout)
logging.debug(str(req))
return req
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:
logging.debug(str(err))
return None
def get_freq_exclusions(args):
recorder_freq_exclusions = {}
for recorder in args.recorder:
req = recorder_req(recorder, 'info', args.record_secs)
if req is None or req.status_code != 200:
continue
excluded = json.loads(req.text).get('freq_excluded', None)
if excluded is None:
continue
recorder_freq_exclusions[recorder] = parse_freq_excluded(excluded)
return recorder_freq_exclusions
def call_record_signals(args, lastbins_history, prom_vars):
if lastbins_history:
signals = []
for bins in lastbins_history:
signals.extend(list(bins))
recorder_freq_exclusions = get_freq_exclusions(
args)
recorder_count = len(recorder_freq_exclusions)
record_signals = choose_record_signal(
signals, recorder_count)
for signal, recorder in choose_recorders(record_signals, recorder_freq_exclusions):
signal_hz = int(signal * 1e6)
record_bps = int(args.record_bw_mbps * (1024 * 1024))
record_samples = int(
record_bps * args.record_secs)
recorder_args = f'record/{signal_hz}/{record_samples}/{record_bps}'
resp = recorder_req(
recorder, recorder_args, args.record_secs)
if resp:
worker_record_request = prom_vars['worker_record_request']
worker_record_request.labels(worker=recorder).set(signal_hz)
def zstd_file(uncompressed_file):
subprocess.check_call(['/usr/bin/zstd', '--rm', uncompressed_file])
def process_fft_lines(args, prom_vars, sock, ext, executor):
lastfreq = 0
fftbuffer = []
lastbins_history = []
lastbins = set()
frame_counter = prom_vars['frame_counter']
txt_buf = ''
while True:
if os.path.exists(args.log):
logging.info(f'{args.log} exists, will append first')
mode = 'ab'
else:
logging.info(f'opening {args.log}')
mode = 'wb'
openlogts = int(time.time())
with open(args.log, mode=mode) as l:
while True:
schedule.run_pending()
try:
sock.settimeout(SOCKET_TIMEOUT)
sock_txt, _ = sock.recvfrom(MTU)
except socket.timeout:
logging.info(
'timeout receiving FFT from scanner - retrying')
continue
if not len(sock_txt):
return
txt_buf += sock_txt.decode('utf8')
lines = txt_buf.splitlines()
if txt_buf.endswith('\n'):
txt_buf = ''
else:
txt_buf = lines[-1]
lines = lines[:-1]
rotatelognow = False
now = int(time.time())
for line in lines:
try:
ts, freq, pw = [float(x) for x in line.strip().split()]
except ValueError:
continue
if pw < 0 or pw > 1:
continue
if freq < 0 or freq > 10e9:
continue
if abs(now - ts) > 60:
continue
l.write(line.encode('utf8') + b'\n')
rollover = abs(freq - lastfreq) > ROLLOVERHZ and fftbuffer
fftbuffer.append((ts, freq, pw))
lastfreq = freq
if rollover:
frame_counter.inc()
lastbins = process_fft(
args, prom_vars, ts, fftbuffer, lastbins)
if lastbins:
lastbins_history = [lastbins] + lastbins_history
lastbins_history = lastbins_history[:args.history]
fftbuffer = []
call_record_signals(args, lastbins_history, prom_vars)
rotate_age = now - openlogts
if rotate_age > args.rotatesecs:
rotatelognow = True
if rotatelognow:
break
new_log = args.log.replace(ext, f'{openlogts}{ext}')
os.rename(args.log, new_log)
executor.submit(zstd_file, new_log)
def find_signals(args, prom_vars):
try:
ext = args.log[args.log.rindex('.'):]
except ValueError:
logging.fatal(f'cannot parse extension from {args.log}')
with concurrent.futures.ProcessPoolExecutor(1) as executor:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setblocking(False)
sock.bind((args.logaddr, args.logport))
process_fft_lines(args, prom_vars, sock, ext, executor)
def main():
parser = argparse.ArgumentParser(
description='watch a scan UDP stream and find signals')
parser.add_argument('--log', default='scan.log', type=str,
help='base path for scan logging')
parser.add_argument('--rotatesecs', default=3600, type=int,
help='rotate scan log after this many seconds')
parser.add_argument('--logaddr', default='127.0.0.1', type=str,
help='UDP stream address')
parser.add_argument('--logport', default=8001, type=int,
help='UDP stream port')
parser.add_argument('--bin_mhz', default=20, type=int,
help='monitoring bin width in MHz')
parser.add_argument('--window', default=4, type=int,
help='signal finding sample window size')
parser.add_argument('--threshold', default=1.5, type=float,
help='signal finding threshold')
parser.add_argument('--history', default=50, type=int,
help='number of frames of signal history to keep')
parser.add_argument('--recorder', action='append', default=[],
help='SDR recorder base URLs (e.g. http://host:port/, multiples can be specified)')
parser.add_argument('--record_bw_mbps', default=20, type=int,
help='record bandwidth in mbps')
parser.add_argument('--record_secs', default=10, type=int,
help='record time duration in seconds')
parser.add_argument('--promport', dest='promport', type=int, default=9000,
help='Prometheus client port')
parser.add_argument(
'--freq-end', dest='freq_end', type=float, default=float(1e9),
help='Set freq_end [default=%(default)r]')
parser.add_argument(
'--freq-start', dest='freq_start', type=float, default=float(100e6),
help='Set freq_start [default=%(default)r]')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
prom_vars = init_prom_vars()
start_http_server(args.promport)
x = threading.Thread(target=find_signals, args=(args, prom_vars,))
x.start()
app = falcon.App()
scanner_form = ScannerForm()
result = Result()
active_requests = ActiveRequests()
app.add_route('/', scanner_form)
app.add_route('/result', result)
app.add_route('/requests', active_requests)
bjoern.run(app, '0.0.0.0', 80)
if __name__ == '__main__':
main()
|
bot.py
|
# -*- coding:utf-8 -*-
from utils.logging.logger import logger
import os
import time
import random
import traceback
from threading import Thread, Timer
from beem.comment import Comment
from steem.settings import settings
from steem.comment import SteemComment
from steem.account import SteemAccount
from steem.writer import Writer
from steem.voter import Voter
from steem.uploader import Uploader
from steem.stream import SteemStream
from steem.collector import query
MINIMUM_VOTE_INTERVAL = 3 # seconds
VOTE_RETRIES = 5
class VoteBot:
def __init__(self, author, mode="stream.comment", config={}):
self.author = author
self.writer = Writer(author=self.author)
self.voter = Voter(author=self.author)
self.uploader = Uploader(author=self.author)
self.mode = mode
self.config = config
# the configuration functions for a vote bot
self.what_to_vote = None
self.who_to_vote = lambda : True
self.when_to_vote = lambda : 15
self.how_to_vote = lambda : 50
self.is_ready = lambda: True
self.after_success = lambda : True
self.last_vote_timestamp = -1
self._vote_queue = []
def _has_reply_comment(self, receiver, message_id):
comments = self._read_comments()
for c in comments:
# check the receiver and the message_id fingerprint
if c.parent_author == receiver and verify_message(message_id, c.body):
logger.info("I found I replied to @{} with message [{}] by searching comment history".format(receiver, message_id))
return (True, c)
return (False, None)
def _has_replied(self, receiver, message_id):
# has reply records in DB, or has replied by checking steem APIs
if self._has_reply_record(receiver, message_id):
return True
(replied, comment) = self._has_reply_comment(receiver, message_id)
if replied:
c = SteemComment(comment=comment.get_parent())
# cache the record into database since not found
self._add_reply_record(receiver, message_id, c, comment["created"])
return True
return False
def _get_reply_body(self, message_id, author):
account = SteemAccount(author)
comments_num = account.remaining_comments() or ''
daily_comments_num = round(account.daily_recovery_comments(), 1) or ''
return get_message(message_id).format(name=author, comments_num=comments_num, daily_comments_num=daily_comments_num)
def reply(self, message_id, post=None, url=None):
""" reply to the newbies' post """
c = SteemComment(comment=post, url=url)
receiver = c.get_comment().author
if not self._has_replied(receiver, message_id):
title = c.get_comment().title
message = self._get_reply_body(message_id, receiver)
self.writer.reply(c.get_comment(), message)
self._add_reply_record(receiver, message_id, c)
logger.info("Replied to @{}'s post [{}] with [{}] message".format(receiver, title, message_id))
return True
else:
logger.info("Skip reply account @{} with [{}] message, because we already reliped before".format(receiver, message_id))
return False
def vote(self, post=None, url=None, weight=None, retries=VOTE_RETRIES):
c = SteemComment(comment=post, url=url)
if retries <= 0:
logger.error("Vote {} failed after retries for {} times".format(c.get_url(), VOTE_RETRIES))
return False
while time.time() - self.last_vote_timestamp < MINIMUM_VOTE_INTERVAL:
wait_time = round(MINIMUM_VOTE_INTERVAL + (random.random() * MINIMUM_VOTE_INTERVAL + 1) * 0.5, 2)
logger.info("Sleep {} seconds to avoid voting too frequently.".format(wait_time))
time.sleep(wait_time)
if time.time() - self.last_vote_timestamp >= MINIMUM_VOTE_INTERVAL:
return self.vote(post, url, weight, retries-1)
success = False
try:
weight = weight or self.weight(c)
success = self.voter.vote(c.get_comment(), weight=weight)
self.last_vote_timestamp = time.time()
except:
logger.error("Failed when voting {} with error: {} . {} retry times left.".format(c.get_url(), traceback.format_exc(), retries-1))
return self.vote(post, url, weight, retries-1)
self.after_success(success)
return success
def start_vote_queue(self):
logger.info("Start Vote Queue...")
def wait_for_vote():
while True:
while(len(self._vote_queue) > 0):
post = self._vote_queue.pop(0)
self.vote(post)
time.sleep(1)
logger.info("Vote Queue Stopped.")
Thread(target=wait_for_vote).start()
def append_to_vote_queue(self, post):
self._vote_queue.append(post)
def what(self, what_to_vote):
""" define the condition of vote for a post """
self.what_to_vote = what_to_vote
return self
def when(self, when_to_vote):
""" define the timing of vote for a post """
self.when_to_vote = when_to_vote
return self
def who(self, who_to_vote):
""" define when to vote the post """
self.who_to_vote = who_to_vote
return self
def how(self, how_to_vote):
""" define the weight of vote the post """
self.how_to_vote = how_to_vote
return self
def ready(self, is_ready):
""" define voter has met energy or other requirements """
self.is_ready = is_ready
return self
def done(self, after_success):
""" define the callback after vote is completed successfully """
self.after_success = after_success
return self
def context(self, ctx):
self.ctx = ctx
return self
def weight(self, post):
return self.how_to_vote(post)
def watch(self, ops):
author = ops['author']
def perform_vote():
if isinstance(ops, Comment):
c = SteemComment(comment=ops)
else:
c = SteemComment(ops=ops)
self.append_to_vote_queue(post=c.get_comment())
self.ctx(ops)
try:
if self.what_to_vote(ops) and self.who_to_vote(author) and self.is_ready():
delay = self.when_to_vote(ops) # mins
if delay is not None and delay > 0:
secs = 60.0 * delay
logger.info("I'll vote after {} seconds".format(secs))
t = Timer(secs, perform_vote)
t.start()
else:
logger.info("I'll vote immediately")
perform_vote()
except:
logger.error("Failed when watching the comment [{}] with error: {} .".format(ops, traceback.format_exc()))
def run(self):
self.start_vote_queue()
if self.mode.startswith("stream."):
if self.mode == "stream.comment":
stream = SteemStream(operations=["comment"])
elif self.mode == "stream.vote":
stream = SteemStream(operations=["vote"])
stream.run(callback=self.watch)
elif self.mode.startswith("query."):
if self.mode == "query.comment.post":
self.config['mode'] = "post"
elif self.mode == "query.comment.comment":
self.config['mode'] = "comment"
elif self.mode == "query.comment.all":
self.config['mode'] = "post+comment"
for c in query(self.config):
self.watch(c)
|
sensor_multi.py
|
"""
A MultiSensorDevice represents a single device with multiple
sensors. This allows one interface to the device to be written
and that device to then expose multiple Sensor objects to
the feed.
This works by a MultiSensorDevice returning a collection
of ChildSensors, starting any of those sensors just passes
the command onto the parent device, the parent device then
passes data back to the feed via a collection of queues.
"""
from datetime import datetime
import logging
from threading import Event, Thread
import time
from sensor_feed.sensor import Sensor
LOGGER = logging.getLogger(__name__)
class ChildSensor(Sensor):
"""
A child sensor just passes information back up to its parent.
All scheduling and data fetching is performed by the parent
device class.
"""
def __init__(self, parent, param_name, param_id, param_unit, dtype=float):
self.parent = parent
self.param_name = param_name
self.param_id = param_id
self.param_unit = param_id
self.dtype = dtype
def start(self, queue, period):
"""Start this sensor. Delegates to parent."""
self.parent.start(self, queue, period)
def stop(self, join=True):
"""Stop this sensor. Delegates to parent."""
self.parent.stop(self, join)
class MultiSensorDevice:
"""
A MultiSensorDevice represents a logical device that has multiple
sensors.
A single control thread is used to periodically get data for all
sensors at once, this data is then split up and provided back to
the feed on individual feeds.
Only child sensors that have had their ``start`` method called
will have a queue to provide data back to the feed so only
these sensors will work.
To subclass this class you need to implement:
* an __init__ method that calls the super __init__ method and
instaniates a list of ChildSensors on the _children attribute.
* the device_name class attribute
* enqueue_data method
"""
#: Identifying name for the device
device_name = ''
#: Shortest period beteen readings (in seconds)
min_period = None
#: Longest possible period between readings (in seconds)
max_period = None
def __init__(self):
self.current_thread = None
self.shutdown_event = None
self.queues = dict()
# implementing classes will need to make this actually
# create some child sensors!
self._children = []
def enqueue_values(self, timestamp):
"""
Actually get the data from the hardware and add it to the
data feed queues.
This class needs to be implemented by any subclass.
It may, for example, use I2C to get data from two or more
sensors and then add that data to the appropriate Queue in
``self.queues``.
"""
raise NotImplementedError("subclass to implement")
def get_sensors(self):
"""Get a list of Sensor-like objects."""
return self._children
def start(self, child, queue, period):
"""
Start collecting data for child.
If this is the first call then a new data collection thread
is started.
"""
if self.min_period is not None and period < self.min_period:
raise ValueError("Requested period is too short " +
"for %s sensor. " % self.device_name +
"Must be greater than {} seconds".format(
self.min_period
))
if self.max_period is not None and period > self.max_period:
raise ValueError("Requested period is too long. " +
"Must be less than {} seconds".format(
self.max_period
))
if child in self.queues:
raise RuntimeError("Child sensor already running.")
self.queues[child] = queue
if self.current_thread is None:
# first sensor to configure, start collector thread
self.shutdown_event = Event()
self.current_thread = self.get_thread(period, self.shutdown_event)
self.current_thread.start()
def stop(self, child, join=True):
"""
Stop collecting data for child.
If this stops the last child then the data collection
thread is stopped.
"""
if self.current_thread is None:
return
if child in self.queues:
del self.queues[child]
if len(self.queues) == 0:
self.shutdown_event.set()
if join:
self.current_thread.join()
self.shutdown_event = None
self.current_thread = None
return
def get_thread(self, period, shutdown_event):
"""Create a Thread object that will do the work."""
def run():
"""Inner data collection loop."""
keep_going = True
while keep_going:
trigger_time = time.time()
next_trigger = trigger_time + period
if shutdown_event.is_set():
keep_going = False
continue
self.enqueue_values(datetime.fromtimestamp(trigger_time))
finished_time = time.time()
sleep_time = next_trigger - finished_time
LOGGER.debug('Device thread for %s: sleep=%f', self.device_name, sleep_time)
if sleep_time < 0:
raise RuntimeError("Sensor too slow. Unable to get " +
"reading in configured period of "
"%f seconds." % period)
time.sleep(sleep_time)
thread = Thread(target=run)
return thread
class DummyMultiSensor(MultiSensorDevice):
device_name = 'dummy'
def __init__(self, *args, **kwargs):
super(DummyMultiSensor, self).__init__(*args, **kwargs)
self._children = [
ChildSensor(self, 'a', 'a', 'mm'),
ChildSensor(self, 'b', 'b', '%'),
]
def enqueue_values(self, timestamp):
"""Just map some data from a list to child sensors..."""
data = [1.2, 5.4]
for sensor, value in zip(self._children, data):
try:
queue = self.queues[sensor]
except KeyError:
# not running, skip.
continue
queue.put((timestamp, value))
|
ssh_utils.py
|
#!/usr/bin/env python2
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# This file contains ssh Session class and support functions/classes.
import cmd
import os
import sys
import socket
import threading
from gppylib.commands.base import WorkerPool, REMOTE
from gppylib.commands.unix import Hostname, Echo
from gpssh_modules import gppxssh_wrapper
sys.path.insert(1, sys.path[0] + '/lib')
from pexpect import pxssh
class HostNameError(Exception):
def __init__(self, msg, lineno=0):
if lineno:
self.msg = ('%s at line %d' % (msg, lineno))
else:
self.msg = msg
def __str__(self):
return self.msg
class SSHError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# Utility Functions
def ssh_prefix(host):
ssh = 'ssh -o "BatchMode yes" -o "StrictHostKeyChecking no" ' + host
return ssh
def get_hosts(hostsfile):
hostlist = HostList()
hostlist.parseFile(hostsfile)
return hostlist.get()
class HostList():
def __init__(self):
self.list = []
def get(self):
return self.list
def add(self, host, lineno=0):
'''Add a host to the hostlist.'''
# we don't allow the user@ syntax here
if host.find('@') >= 0:
raise HostNameError(host, lineno)
# MPP-13617 - check for ipv6
if host.find(':') >= 0:
try:
socket.inet_pton(socket.AF_INET6, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
# MPP-13617 - check for ipv4
if host.find('.') >= 0:
octs = host.split('.')
if len(octs) == 4 and False not in [o.isdigit() for o in octs]:
try:
socket.inet_pton(socket.AF_INET, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
self.list.append(host)
return self.list
def parseFile(self, path):
'''Add lines in a file to the hostlist.'''
with open(path) as fp:
for i, line in enumerate(fp):
line = line.strip()
if not line or line[0] == '#':
continue
self.add(line, i + 1)
return self.list
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool()
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
# Session is a command session, derived from a base class cmd.Cmd
class Session(cmd.Cmd):
'''Implements a list of open ssh sessions ready to execute commands'''
verbose = False
hostList = []
userName = None
echoCommand = False
class SessionError(StandardError):
pass
class SessionCmdExit(StandardError):
pass
def __init__(self, hostList=None, userName=None):
cmd.Cmd.__init__(self)
self.pxssh_list = []
self.prompt = '=> '
self.peerStringFormatRaw = None
if hostList:
for host in hostList:
self.hostList.append(host)
if userName: self.userName = userName
def peerStringFormat(self):
if self.peerStringFormatRaw: return self.peerStringFormatRaw
cnt = 0
for p in self.pxssh_list:
if cnt < len(p.x_peer): cnt = len(p.x_peer)
self.peerStringFormatRaw = "[%%%ds]" % cnt
return self.peerStringFormatRaw
def login(self, hostList=None, userName=None, delaybeforesend=0.05, sync_multiplier=1.0, sync_retries=3):
"""This is the normal entry point used to add host names to the object and log in to each of them"""
if self.verbose: print '\n[Reset ...]'
if not (self.hostList or hostList):
raise self.SessionError('No host list available to Login method')
if not (self.userName or userName):
raise self.SessionError('No user name available to Login method')
# Cleanup
self.clean()
if hostList: # We have a new hostlist to use, initialize it
self.hostList = []
for host in hostList:
self.hostList.append(host)
if userName:
self.userName = userName # We have a new userName to use
# MPP-6583. Save off term type and set to nothing before creating ssh process
origTERM = os.getenv('TERM', None)
os.putenv('TERM', '')
good_list = []
print_lock = threading.Lock()
def connect_host(hostname, p):
self.hostList.append(hostname)
try:
# The sync_multiplier value is passed onto pexpect.pxssh which is used to determine timeout
# values for prompt verification after an ssh connection is established.
p.login(hostname, self.userName, sync_multiplier=sync_multiplier)
p.x_peer = hostname
p.x_pid = p.pid
good_list.append(p)
if self.verbose:
with print_lock:
print '[INFO] login %s' % hostname
except Exception as e:
with print_lock:
print '[ERROR] unable to login to %s' % hostname
if type(e) is pxssh.ExceptionPxssh:
print e
elif type(e) is pxssh.EOF:
print 'Could not acquire connection.'
else:
print 'hint: use gpssh-exkeys to setup public-key authentication between hosts'
thread_list = []
for host in hostList:
p = gppxssh_wrapper.PxsshWrapper(delaybeforesend=delaybeforesend,
sync_retries=sync_retries,
options={"StrictHostKeyChecking": "no",
"BatchMode": "yes"})
t = threading.Thread(target=connect_host, args=(host, p))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
# Restore terminal type
if origTERM:
os.putenv('TERM', origTERM)
self.pxssh_list = good_list
def close(self):
return self.clean()
def reset(self):
'''reads from all the ssh connections to make sure we dont have any pending cruft'''
for s in self.pxssh_list:
s.readlines()
def clean(self):
net_return_code = self.closePxsshList(self.pxssh_list)
self.pxssh_list = []
return net_return_code
def emptyline(self):
pass
def escapeLine(self, line):
'''Escape occurrences of \ and $ as needed and package the line as an "eval" shell command'''
line = line.strip()
if line == 'EOF' or line == 'exit' or line == 'quit':
raise self.SessionCmdExit()
line = line.split('\\')
line = '\\\\'.join(line)
line = line.split('"')
line = '\\"'.join(line)
line = line.split('$')
line = '\\$'.join(line)
line = 'eval "' + line + '" < /dev/null'
return line
def executeCommand(self, command):
commandoutput = []
if self.echoCommand:
escapedCommand = command.replace('"', '\\"')
command = 'echo "%s"; %s' % (escapedCommand, command)
# Execute the command in all of the ssh sessions
for s in self.pxssh_list:
s.sendline(command)
# Wait for each command and retrieve the output
for s in self.pxssh_list:
# Wait for each command to finish
# !! TODO verify that this is a tight wait loop and find another way to do this
while not s.prompt(120) and s.isalive() and not s.eof(): pass
for s in self.pxssh_list:
# Split the output into an array of lines so that we can add text to the beginning of
# each line
output = s.before.split('\n')
output = output[1:-1]
commandoutput.append(output)
return commandoutput.__iter__()
# Interactive command line handler
# Override of base class, handles commands that aren't recognized as part of a predefined set
# The "command" argument is a command line to be executed on all available command sessions
# The output of the command execution is printed to the standard output, prepended with
# the hostname of each machine from which the output came
def default(self, command):
line = self.escapeLine(command)
if self.verbose: print command
# Execute the command on our ssh sessions
commandoutput = self.executeCommand(command)
self.writeCommandOutput(commandoutput)
def writeCommandOutput(self, commandoutput):
'''Takes a list of output lists as an iterator and writes them to standard output,
formatted with the hostname from which each output array was obtained'''
for s in self.pxssh_list:
output = commandoutput.next()
# Write the output
if len(output) == 0:
print (self.peerStringFormat() % s.x_peer)
else:
for line in output:
print (self.peerStringFormat() % s.x_peer), line
def closePxsshList(self, list):
lock = threading.Lock()
return_codes = [0]
def closePxsshOne(p, return_codes):
p.logout()
with lock:
return_codes.append(p.exitstatus)
th = []
for p in list:
t = threading.Thread(target=closePxsshOne, args=(p, return_codes))
t.start()
th.append(t)
for t in th:
t.join()
return max(return_codes)
|
executor.py
|
import asyncio
import logging
from threading import Thread
import aiojobs
from rssant_common.attrdict import AttrDict
from .client import ActorClient, AsyncActorClient
from .context import ActorContext
from .helper import auto_restart_when_crash, unsafe_kill_thread
from .message import ActorMessage
from .prometheus import ACTOR_EXECUTOR_TIME
from .queue import ActorMessageQueue
from .registery import ActorRegistery
LOG = logging.getLogger(__name__)
def normalize_concurrency(concurrency):
num_async_workers = concurrency // 30 + 1
num_thread_workers = max(1, concurrency - num_async_workers)
concurrency = num_async_workers + num_thread_workers
async_concurrency = concurrency * 10 / num_async_workers
async_pending_limit = max(10, concurrency // 10)
return AttrDict(
concurrency=concurrency,
num_async_workers=num_async_workers,
num_thread_workers=num_thread_workers,
async_concurrency=async_concurrency,
async_pending_limit=async_pending_limit,
)
class ActorExecutor:
def __init__(
self,
actors,
queue: ActorMessageQueue,
registery: ActorRegistery,
concurrency=100,
token=None,
):
self.actors = actors
self.queue = queue
self.registery = registery
self.token = token
concurrency_info = normalize_concurrency(concurrency)
self.concurrency = concurrency_info.concurrency
self.num_async_workers = concurrency_info.num_async_workers
self.num_thread_workers = concurrency_info.num_thread_workers
self.async_concurrency = concurrency_info.async_concurrency
self.async_pending_limit = concurrency_info.async_pending_limit
self.threads = []
@auto_restart_when_crash
def thread_main(self):
actor_client = ActorClient(registery=self.registery, token=self.token)
with actor_client:
while True:
try:
message = self.queue.op_execute()
self._handle_message(message, actor_client=actor_client)
except Exception as ex:
LOG.exception(ex)
@auto_restart_when_crash
async def _async_main(self):
scheduler = await aiojobs.create_scheduler(
limit=self.async_concurrency, pending_limit=self.async_pending_limit)
actor_client = AsyncActorClient(registery=self.registery, token=self.token)
async with actor_client:
try:
while True:
try:
message = await self.queue.async_op_execute()
task = self._async_handle_message(message, actor_client=actor_client)
await scheduler.spawn(task)
except Exception as ex:
LOG.exception(ex)
finally:
await scheduler.close()
@auto_restart_when_crash
def async_main(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._async_main())
def _is_deliverable(self, message: ActorMessage):
return message.dst in self.actors
def _handle_message(self, message: ActorMessage, actor_client):
if not self._is_deliverable(message):
LOG.error(f'undeliverable message {message}')
return
with ACTOR_EXECUTOR_TIME.labels(dst=message.dst).time():
actor = self.actors[message.dst]
ctx = ActorContext(
actor=actor, message=message,
registery=self.registery, queue=self.queue,
actor_client=actor_client)
ctx._thread_execute()
async def _async_handle_message(self, message: ActorMessage, actor_client):
if not self._is_deliverable(message):
LOG.error(f'undeliverable message {message}')
return
with ACTOR_EXECUTOR_TIME.labels(dst=message.dst).time():
actor = self.actors[message.dst]
ctx = ActorContext(
actor=actor, message=message,
registery=self.registery, queue=self.queue,
actor_client=actor_client)
await ctx._async_execute()
def start(self):
for i in range(self.num_async_workers):
t = Thread(target=self.async_main, name=f'actor_async_worker_{i}')
self.threads.append(t)
for i in range(self.num_thread_workers):
t = Thread(target=self.thread_main, name=f'actor_thread_worker_{i}')
self.threads.append(t)
for t in self.threads:
t.daemon = True
t.start()
def shutdown(self):
for t in self.threads:
if t.is_alive():
unsafe_kill_thread(t.ident)
def join(self):
for t in self.threads:
t.join()
|
stratum-miner.py
|
# Copyright (c) 2019, The Monero Project
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import select
import binascii
import pycryptonight
import pyrx
import struct
import json
import sys
import os
import time
from multiprocessing import Process, Queue
pool_host = 'monerop.com'
pool_port = 4242
pool_pass = 'xx'
wallet_address = '84SqNpyDDvrde6kncNwqEqXfcDVKwk7GJe1YM8Bzz96q3PC5w7mjBh8TmSpzMiY6fpXYXNUAFzwTWP3u9zqt9THNT4T3tAv'
def main():
pool_ip = socket.gethostbyname(pool_host)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((pool_ip, pool_port))
q = Queue()
proc = Process(target=worker, args=(q, s))
proc.daemon = True
proc.start()
login = {
'method': 'login',
'params': {
'login': wallet_address,
'pass': pool_pass,
'rigid': '',
'agent': 'stratum-miner-py/0.1'
},
'id':1
}
print('Logging into pool: {}:{}'.format(pool_host, pool_port))
s.sendall(str(json.dumps(login)+'\n').encode('utf-8'))
try:
while 1:
line = s.makefile().readline()
r = json.loads(line)
error = r.get('error')
result = r.get('result')
method = r.get('method')
params = r.get('params')
if error:
print('Error: {}'.format(error))
continue
if result and result.get('status'):
print('Status: {}'.format(result.get('status')))
if result and result.get('job'):
login_id = result.get('id')
job = result.get('job')
job['login_id'] = login_id
q.put(job)
elif method and method == 'job' and len(login_id):
q.put(params)
except KeyboardInterrupt:
print('{}Exiting'.format(os.linesep))
proc.terminate()
s.close()
sys.exit(0)
def pack_nonce(blob, nonce):
b = binascii.unhexlify(blob)
bin = struct.pack('39B', *bytearray(b[:39]))
bin += struct.pack('I', nonce)
bin += struct.pack('{}B'.format(len(b)-43), *bytearray(b[43:]))
return bin
def worker(q, s):
started = time.time()
hash_count = 0
while 1:
job = q.get()
if job.get('login_id'):
login_id = job.get('login_id')
print('Login ID: {}'.format(login_id))
blob = job.get('blob')
target = job.get('target')
job_id = job.get('job_id')
height = job.get('height')
block_major = int(blob[:2], 16)
cnv = 0
if block_major >= 7:
cnv = block_major - 6
if cnv > 5:
seed_hash = binascii.unhexlify(job.get('seed_hash'))
print('New job with target: {}, RandomX, height: {}'.format(target, height))
else:
print('New job with target: {}, CNv{}, height: {}'.format(target, cnv, height))
target = struct.unpack('I', binascii.unhexlify(target))[0]
if target >> 32 == 0:
target = int(0xFFFFFFFFFFFFFFFF / int(0xFFFFFFFF / target))
nonce = 1
while 1:
bin = pack_nonce(blob, nonce)
if cnv > 5:
hash = pyrx.get_rx_hash(bin, seed_hash, height)
else:
hash = pycryptonight.cn_slow_hash(bin, cnv, 0, height)
hash_count += 1
sys.stdout.write('.')
sys.stdout.flush()
hex_hash = binascii.hexlify(hash).decode()
r64 = struct.unpack('Q', hash[24:])[0]
if r64 < target:
elapsed = time.time() - started
hr = int(hash_count / elapsed)
print('{}Hashrate: {} H/s'.format(os.linesep, hr))
submit = {
'method':'submit',
'params': {
'id': login_id,
'job_id': job_id,
'nonce': binascii.hexlify(struct.pack('<I', nonce)).decode(),
'result': hex_hash
},
'id':1
}
print('Submitting hash: {}'.format(hex_hash))
s.sendall(str(json.dumps(submit)+'\n').encode('utf-8'))
select.select([s], [], [], 3)
if not q.empty():
break
nonce += 1
if __name__ == '__main__':
main()
|
event.py
|
# -*- coding: utf-8 -*-
'''
tests.integration.modules.event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import time
import threading
from Queue import Queue, Empty
# Import salt libs
import integration
from salt.utils import event
class EventModuleTest(integration.ModuleCase):
def test_event_fire_master(self):
events = Queue()
def get_event(events):
me = event.MasterEvent(self.master_opts['sock_dir'])
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIn(
'event.fire_master: just test it!!!!', eventfired['data']
)
ret = self.run_function(
'event.fire_master',
['event.fire_master: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def test_event_fire(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(**self.minion_opts)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest']
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss']
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
def test_event_fire_ipc_mode_tcp(self):
events = Queue()
def get_event(events):
me = event.MinionEvent(**self.sub_minion_opts)
events.put_nowait(
me.get_event(wait=10, tag='salttest', full=False)
)
threading.Thread(target=get_event, args=(events,)).start()
time.sleep(1) # Allow multiprocessing.Process to start
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
eventfired = events.get(block=True, timeout=10)
self.assertIn('event.fire: just test it!!!!', eventfired)
ret = self.run_function(
'event.fire', ['event.fire: just test it!!!!', 'salttest-miss'],
minion_tgt='sub_minion'
)
self.assertTrue(ret)
with self.assertRaises(Empty):
eventfired = events.get(block=True, timeout=10)
if __name__ == '__main__':
from integration import run_tests
run_tests(EventModuleTest)
|
websocket_manager.py
|
import json
import time
from threading import Thread, Lock
from queue import Queue
from typing import Callable
from gzip import decompress
from websocket import WebSocketApp
from confluent_kafka import Producer
class WebsocketManager():
_CONNECT_TIMEOUT_S = 5
def __init__(self, url: str, subscribe: Callable, unsubscribe: Callable):
"""
subscribe is a function that's called right after the websocket connects.
unsubscribe is a function that's called just before the websocket disconnects.
both subscribe and unsubscribe MUST have one argument, which is an instance of
WebsocketManager (see KrakenWsManagerFactory in ws_factories.py for an example).
"""
self.connect_lock = Lock()
self.ws = None
self.queue = Queue()
self.url = url
self.subscribe = subscribe
self.unsubscribe = unsubscribe
self.connect()
conf = {
'bootstrap.servers': 'SSL://kafka-16054d72-gda-3ad8.aivencloud.com:18921',
'security.protocol' : 'SSL',
'client.id': 'kafka-python-producer',
'ssl.certificate.location': '../../jay.cert',
'ssl.key.location': '../../jay.key',
'ssl.ca.location': '../../ca-aiven-cert.pem',
}
self.producer = Producer(conf)
def _acked(self, err, msg):
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
#delivered_records += 1
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
def get_msg(self):
"""
Retrieves a message from the front of the queue.
NOTE: The message received has an extra field "received_timestamp", which
is the UTC timestamp of when the message was received in milliseconds.
"""
return self.queue.get()
def _on_message(self, ws, message):
message = json.loads(message)
if isinstance(message, dict):
message["receive_timestamp"] = int(time.time()*10**3)
try:
self.producer.produce(f"test-phemex-raw", value=json.dumps(message), on_delivery=self._acked)
self.producer.poll(0)
except Exception as e:
print("An error occurred while producing: %s" % e)
def get_q_size(self):
"""Returns the size of the queue"""
print(f"Queue Backlog: {self.queue.qsize()}")
def send(self, message):
"""Sends a message over the websocket"""
self.connect()
self.ws.send(message)
def send_json(self, message):
"""Sends a json message over the websocket"""
self.send(json.dumps(message))
def _connect(self):
"""Creates a websocket app and connects"""
assert not self.ws, "ws should be closed before attempting to connect"
self.ws = WebSocketApp(
self.url,
on_message=self._wrap_callback(self._on_message),
on_close=self._wrap_callback(self._on_close),
on_error=self._wrap_callback(self._on_error),
)
wst = Thread(target=self._run_websocket, args=(self.ws,))
wst.daemon = True
wst.start()
# Wait for socket to connect
ts = time.time()
while self.ws and (not self.ws.sock or not self.ws.sock.connected):
if time.time() - ts > self._CONNECT_TIMEOUT_S:
self.ws = None
raise Exception(
f"Failed to connect to websocket url {self._get_url()}")
time.sleep(0.1)
def _wrap_callback(self, f):
"""Wrap websocket callback"""
def wrapped_f(ws, *args, **kwargs):
if ws is self.ws:
try:
f(ws, *args, **kwargs)
except Exception as e:
raise Exception(f'Error running websocket callback: {e}')
return wrapped_f
def _run_websocket(self, ws):
""""Runs the websocket app"""
try:
ws.run_forever(ping_interval=30)
except Exception as e:
raise Exception(f'Unexpected error while running websocket: {e}')
finally:
pass
# self._reconnect(ws)
def _reconnect(self, ws):
"""Closes a connection and attempts to reconnect"""
assert ws is not None, '_reconnect should only be called with an existing ws'
if ws is self.ws:
self.ws = None
ws.close()
self.connect()
def connect(self):
"""Connects to the websocket"""
if self.ws:
return
with self.connect_lock:
while not self.ws:
self._connect()
if self.ws:
self.subscribe(self)
return
def resubscribe(self):
self.unsubscribe()
self.subscribe()
def _on_close(self, ws, a, b):
print("Connection Closed")
self.unsubscribe(self)
self._reconnect(ws)
def _on_error(self, ws, error):
print(f"websocket error: {error}")
self._reconnect(ws)
def reconnect(self) -> None:
if self.ws is not None:
self._reconnect(self.ws)
|
test.py
|
import os.path as p
import random
import threading
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
import json
import subprocess
import kafka.errors
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer
from google.protobuf.internal.encoder import _VarintBytes
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
# TODO: modify tests to respect `skip_broken_messages` setting.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
config_dir='configs',
main_configs=['configs/kafka.xml'],
with_kafka=True,
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def kafka_produce(topic, messages, timestamp=None):
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
print ("Produced {} messages for topic {}".format(len(messages), topic))
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in consumer.poll(5000).items():
if toppar.topic == topic:
for message in messages:
yield message.value
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = ''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092")
producer.send(topic=topic, value=data)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print("kafka_id is {}".format(kafka_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP TABLE IF EXISTS test.kafka')
wait_kafka_is_available()
print("kafka is available - running test")
yield # run test
instance.query('DROP TABLE test.kafka')
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('kafka1:19092', 'old', 'old', 'JSONEachRow', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'new',
kafka_group_name = 'new',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_csv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_tsv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_select_empty(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in offsets.items():
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages*batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query('SELECT _key, key, _topic, value, _offset, _partition, _timestamp FROM test.kafka', ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka'")) == 1:
time.sleep(1)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
if __name__ == '__main__':
cluster.start()
raw_input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
asset_info.py
|
#!/usr/bin/env python
# coding: utf8
'''
@author: qitan
@contact: qqing_lai@hotmail.com
@file: asset_info.py
@time: 2017/3/30 15:33
@desc:
'''
from deploy.saltapi import SaltAPI
from devops import settings
import threading
asset_info = []
def GetInfoDict(r, arg):
try:
result = ''
for k in r[arg]:
result = result + k + ': ' + str(r[arg][k]) + '\n'
except:
result = 'Nan'
return result
def GetInfo(r, arg):
try:
arg = str(r[arg])
except:
arg = 'Nan'
return arg
def GetAssetInfo(tgt):
'''
Salt API获取主机信息并进行格式化输出
'''
global asset_info
info = {}
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
ret = sapi.remote_server_info(tgt, 'grains.items')
info['sn']=GetInfo(ret,'serialnumber')
info['hostname']=GetInfo(ret,'fqdn')
info['nodename']=tgt
info['os']=GetInfo(ret,'os')+GetInfo(ret,'osrelease')+' '+GetInfo(ret,'osarch')
info['manufacturer']=GetInfo(ret,'manufacturer')
info['cpu_model']=GetInfo(ret,'cpu_model')
info['productname']=GetInfo(ret,'productname')
info['cpu_nums']=GetInfo(ret,'num_cpus')
info['kernel'] = GetInfo(ret,'kernel') + GetInfo(ret,'kernelrelease')
info['zmqversion'] = GetInfo(ret,'zmqversion')
info['shell'] = GetInfo(ret,'shell')
info['saltversion'] = GetInfo(ret,'saltversion')
info['locale'] = GetInfoDict(ret, 'locale_info')
info['selinux'] = GetInfoDict(ret, 'selinux')
if 'virtual_subtype' in ret:
virtual = GetInfo(ret,'virtual') + '-' + GetInfo(ret,'virtual_subtype')
else:
virtual=GetInfo(ret,'virtual')
info['virtual'] = virtual
try:
hwaddr = ret['hwaddr_interfaces']
ipaddr = ret['ip4_interfaces']
hwaddr.pop('lo')
ipaddr.pop('lo')
network = ''
for i in ipaddr:
ip = ''
for j in ipaddr[i]:
ip = ip + j + '/'
network = network + i + ': ' + ip.strip('/') + '-' + hwaddr[i] + '\n'
info['network'] = network
except:
info['network'] = 'Nan'
mem=GetInfo(ret,'mem_total')
if mem > 1000:
mem = int(mem)/1000.0
memory = ('%.1f'%mem) + 'G'
else:
memory = str(mem) + 'M'
info['memory'] = memory
ret = sapi.remote_server_info(tgt, 'disk.usage')
disk = ''
for i in ret:
r = int(ret[i]['1K-blocks'])/1000
if r > 1000:
r = r/1000
s = str(r) + 'G'
if r > 1000:
r = r/1000.0
s = ('%.1f'%r) + 'T'
else:
s = str(r) + 'M'
disk = disk + i + ': ' + s + '\n'
info['disk'] = disk
asset_info.append(info)
def MultipleCollect(tgt_list):
global asset_info
asset_info = []
threads = []
loop = 0
count = len(tgt_list)
for i in range(0, count, 2):
keys = range(loop*2, (loop+1)*2, 1)
#实例化线程
for i in keys:
if i >= count:
break
else:
t = threading.Thread(target=GetAssetInfo, args=(tgt_list[i],))
threads.append(t)
#启动线程
for i in keys:
if i >=count:
break
else:
threads[i].start()
#等待并发线程结束
for i in keys:
if i >= count:
break
else:
threads[i].join()
loop = loop + 1
return asset_info
|
test_bio_membuf.py
|
#!/usr/bin/env python
"""Unit tests for M2Crypto.BIO.MemoryBuffer.
Copyright (c) 2000 Ng Pheng Siong. All rights reserved."""
import os
import multiprocessing
try:
import unittest2 as unittest
except ImportError:
import unittest
from M2Crypto.BIO import MemoryBuffer
class TimeLimitExpired(Exception):
pass
def time_limit(timeout, func, exc_msg, *args, **kwargs):
p = multiprocessing.Process(target=func)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
raise TimeLimitExpired(exc_msg)
class MemoryBufferTestCase(unittest.TestCase):
def setUp(self):
self.data = b'abcdef' * 64
def tearDown(self):
pass
def test_init_empty(self):
mb = MemoryBuffer()
self.assertEqual(len(mb), 0)
out = mb.read()
assert out is None
def test_init_empty_cm(self):
with MemoryBuffer() as mb:
self.assertEqual(len(mb), 0)
out = mb.read()
assert out is None
def test_init_something(self):
mb = MemoryBuffer(self.data)
self.assertEqual(len(mb), len(self.data))
out = mb.read()
self.assertEqual(out, self.data)
def test_init_something_result_bytes(self):
mb = MemoryBuffer(self.data)
self.assertEqual(len(mb), len(self.data))
out = mb.read()
self.assertIsInstance(out, bytes)
def test_init_something_cm(self):
with MemoryBuffer(self.data) as mb:
self.assertEqual(len(mb), len(self.data))
out = mb.read()
self.assertEqual(out, self.data)
def test_read_less_than(self):
chunk = len(self.data) - 7
mb = MemoryBuffer(self.data)
out = mb.read(chunk)
self.assertEqual(out, self.data[:chunk])
self.assertEqual(len(mb), (len(self.data)) - chunk)
def test_read_more_than(self):
chunk = len(self.data) + 8
mb = MemoryBuffer(self.data)
out = mb.read(chunk)
self.assertEqual(out, self.data)
self.assertEqual(len(mb), 0)
def test_write_close(self):
mb = MemoryBuffer(self.data)
assert mb.writeable()
mb.write_close()
assert mb.readable()
with self.assertRaises(IOError):
mb.write(self.data)
assert not mb.writeable()
def test_closed(self):
mb = MemoryBuffer(self.data)
mb.close()
with self.assertRaises(IOError):
mb.write(self.data)
assert mb.readable() and not mb.writeable()
def test_readline(self):
# test against possible endless loop
# http://stackoverflow.com/questions/9280550/
timeout_secs = 10
time_limit(timeout_secs, run_test,
'The readline() should not timeout!')
def run_test(*args, **kwargs):
sep = os.linesep.encode()
with MemoryBuffer(b'hello\nworld\n') as mb:
assert mb.readable()
assert mb.readline() == b'hello' + sep
assert mb.readline() == b'world' + sep
with MemoryBuffer(b'hello\nworld\n') as mb:
assert mb.readlines() == [b'hello' + sep, b'world' + sep]
def suite():
return unittest.makeSuite(MemoryBufferTestCase)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
__init__.py
|
# -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import print_function
import os
import re
import sys
import copy
import json
import time
import errno
import signal
import shutil
import pprint
import atexit
import logging
import tempfile
import subprocess
import multiprocessing
from hashlib import md5
from datetime import datetime, timedelta
from six import string_types
try:
import pwd
except ImportError:
pass
STATE_FUNCTION_RUNNING_RE = re.compile(
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
)
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
SALT_LIBS = os.path.dirname(CODE_DIR)
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.case import ShellTestCase
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
from salttesting.helpers import requires_sshd_server
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
# Update sys.path
ensure_in_syspath(CODE_DIR, SALT_LIBS)
# Import Salt libs
import salt
import salt.config
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils
import salt.utils.process
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
try:
import salt.master
except ImportError:
# Not required fro raet tests
pass
# Import 3rd-party libs
import yaml
if os.uname()[0] == 'Darwin':
SYS_TMP_DIR = '/tmp'
else:
SYS_TMP_DIR = os.environ.get('TMPDIR', tempfile.gettempdir())
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
PYEXEC = 'python{0}.{1}'.format(*sys.version_info)
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-prodenv-state-tree')
TMP_CONF_DIR = os.path.join(TMP, 'config')
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
RUNTIME_CONFIGS = {}
log = logging.getLogger(__name__)
def cleanup_runtime_config_instance(to_cleanup):
# Explicit and forced cleanup
for key in to_cleanup.keys():
instance = to_cleanup.pop(key)
del instance
atexit.register(cleanup_runtime_config_instance, RUNTIME_CONFIGS)
def run_tests(*test_cases, **kwargs):
'''
Run integration tests for the chosen test cases.
Function uses optparse to set up test environment
'''
needs_daemon = kwargs.pop('needs_daemon', True)
if kwargs:
raise RuntimeError(
'The \'run_tests\' function only accepts \'needs_daemon\' as a '
'keyword argument'
)
class TestcaseParser(SaltTestcaseParser):
def setup_additional_options(self):
self.add_option(
'--sysinfo',
default=False,
action='store_true',
help='Print some system information.'
)
self.output_options_group.add_option(
'--no-colors',
'--no-colours',
default=False,
action='store_true',
help='Disable colour printing.'
)
if needs_daemon:
self.add_option(
'--transport',
default='zeromq',
choices=('zeromq', 'raet'),
help='Set to raet to run integration tests with raet transport. Default: %default'
)
def validate_options(self):
SaltTestcaseParser.validate_options(self)
# Transplant configuration
transport = None
if needs_daemon:
transport = self.options.transport
TestDaemon.transplant_configs(transport=transport)
def run_testcase(self, testcase, needs_daemon=True): # pylint: disable=W0221
if needs_daemon:
print(' * Setting up Salt daemons to execute tests')
with TestDaemon(self):
return SaltTestcaseParser.run_testcase(self, testcase)
return SaltTestcaseParser.run_testcase(self, testcase)
parser = TestcaseParser()
parser.parse_args()
for case in test_cases:
if parser.run_testcase(case, needs_daemon=needs_daemon) is False:
parser.finalize(1)
parser.finalize(0)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = get_colors(self.parser.options.no_colors is False)
def __enter__(self):
'''
Start a master and minion
'''
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'raet':
self.start_raet_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
master = salt.master.Master(self.master_opts)
self.master_process = multiprocessing.Process(target=master.start)
self.master_process.start()
minion = salt.minion.Minion(self.minion_opts)
self.minion_process = multiprocessing.Process(target=minion.tune_in)
self.minion_process.start()
sub_minion = salt.minion.Minion(self.sub_minion_opts)
self.sub_minion_process = multiprocessing.Process(
target=sub_minion.tune_in
)
self.sub_minion_process.start()
smaster = salt.master.Master(self.syndic_master_opts)
self.smaster_process = multiprocessing.Process(target=smaster.start)
self.smaster_process.start()
syndic = salt.minion.Syndic(self.syndic_opts)
self.syndic_process = multiprocessing.Process(target=syndic.tune_in)
self.syndic_process.start()
def start_raet_daemons(self):
'''
Fire up the raet daemons!
'''
import salt.daemons.flo
master = salt.daemons.flo.IofloMaster(self.master_opts)
self.master_process = multiprocessing.Process(target=master.start)
self.master_process.start()
minion = salt.daemons.flo.IofloMinion(self.minion_opts)
self.minion_process = multiprocessing.Process(target=minion.tune_in)
self.minion_process.start()
sub_minion = salt.daemons.flo.IofloMinion(self.sub_minion_opts)
self.sub_minion_process = multiprocessing.Process(
target=sub_minion.tune_in
)
self.sub_minion_process.start()
# Wait for the daemons to all spin up
time.sleep(5)
#smaster = salt.daemons.flo.IofloMaster(self.syndic_master_opts)
#self.smaster_process = multiprocessing.Process(target=smaster.start)
#self.smaster_process.start()
# no raet syndic daemon yet
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
print(' * Initializing SSH subsystem')
keygen = salt.utils.which('ssh-keygen')
sshd = salt.utils.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(TMP_CONF_DIR):
os.makedirs(TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(keygen_err))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, TMP_CONF_DIR)
auth_key_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(keygen_dsa_err))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(keygen_escda_err))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(keygen_ed25519_err))
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(sshd_err))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, TMP_CONF_DIR)
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(pwd.getpwuid(os.getuid()).pw_name))
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_CONFIGS:
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(TMP_CONF_DIR):
shutil.rmtree(TMP_CONF_DIR)
os.makedirs(TMP_CONF_DIR)
print(' * Transplanting configuration files to {0!r}'.format(TMP_CONF_DIR))
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
master_opts['user'] = running_tests_user
tests_know_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.fopen(tests_know_hosts_file, 'w') as known_hosts:
known_hosts.write('')
master_opts['known_hosts_file'] = tests_know_hosts_file
minion_config_path = os.path.join(CONF_DIR, 'minion')
minion_opts = salt.config._read_conf_file(minion_config_path)
minion_opts['user'] = running_tests_user
minion_opts['root_dir'] = master_opts['root_dir'] = os.path.join(TMP, 'master-minion-root')
syndic_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic'))
syndic_opts['user'] = running_tests_user
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
sub_minion_opts['root_dir'] = os.path.join(TMP, 'sub-minion-root')
sub_minion_opts['user'] = running_tests_user
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
syndic_master_opts['user'] = running_tests_user
syndic_master_opts['root_dir'] = os.path.join(TMP, 'syndic-master-root')
if transport == 'raet':
master_opts['transport'] = 'raet'
master_opts['raet_port'] = 64506
minion_opts['transport'] = 'raet'
minion_opts['raet_port'] = 64510
sub_minion_opts['transport'] = 'raet'
sub_minion_opts['raet_port'] = 64520
#syndic_master_opts['transport'] = 'raet'
# Set up config options that require internal data
master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
TMP_PRODENV_STATE_TREE
]
}
master_opts['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(
os.path.join(
FILES,
'ext.yaml'
)
)}
)
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(master_opts['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
master_opts['extension_modules'] = os.path.join(TMP, 'master-minion-root', 'extension_modules')
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic_master'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic_master'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
salt.utils.fopen(os.path.join(TMP_CONF_DIR, entry), 'w').write(
yaml.dump(computed_config, default_flow_style=False)
)
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(TMP_CONF_DIR, 'master'))
minion_config_path = os.path.join(TMP_CONF_DIR, 'minion')
minion_opts = salt.config.minion_config(minion_config_path)
syndic_opts = salt.config.syndic_config(
os.path.join(TMP_CONF_DIR, 'syndic'),
minion_config_path
)
sub_minion_opts = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'sub_minion'))
syndic_master_opts = salt.config.master_config(os.path.join(TMP_CONF_DIR, 'syndic_master'))
RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP_PRODENV_STATE_TREE,
TMP,
],
running_tests_user)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
self.sub_minion_process.join()
salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
self.minion_process.join()
salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
self.master_process.join()
try:
salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
self.syndic_process.join()
except AttributeError:
pass
try:
salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
self.smaster_process.join()
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
# Wait for minions to connect back
wait_minion_connections = multiprocessing.Process(
target=self.wait_for_minion_connections,
args=(self.minion_targets, self.MINIONS_CONNECT_TIMEOUT)
)
wait_minion_connections.start()
wait_minion_connections.join()
wait_minion_connections.terminate()
if wait_minion_connections.exitcode > 0:
print(
'\n {RED_BOLD}*{ENDC} ERROR: Minions failed to connect'.format(
**self.colors
)
)
return False
del wait_minion_connections
sync_needed = self.parser.options.clean
if self.parser.options.clean is False:
def sumfile(fpath):
# Since we will be do'in this for small files, it should be ok
fobj = fopen(fpath)
m = md5()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
# Since we're not cleaning up, let's see if modules are already up
# to date so we don't need to re-sync them
modules_dir = os.path.join(FILES, 'file', 'base', '_modules')
for fname in os.listdir(modules_dir):
if not fname.endswith('.py'):
continue
dfile = os.path.join(
'/tmp/salttest/cachedir/extmods/modules/', fname
)
if not os.path.exists(dfile):
sync_needed = True
break
sfile = os.path.join(modules_dir, fname)
if sumfile(sfile) != sumfile(dfile):
sync_needed = True
break
if sync_needed:
# Wait for minions to "sync_all"
for target in [self.sync_minion_modules,
self.sync_minion_states]:
sync_minions = multiprocessing.Process(
target=target,
args=(self.minion_targets, self.MINIONS_SYNC_TIMEOUT)
)
sync_minions.start()
sync_minions.join()
if sync_minions.exitcode > 0:
return False
sync_minions.terminate()
del sync_minions
return True
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
for dirname in (TMP, TMP_STATE_TREE, TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
shutil.rmtree(dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {RED_BOLD}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', expr_form='list'
)
return [
k for (k, v) in running.iteritems() if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
responses = self.client.cmd(
list(expected_connections), 'test.ping', expr_form='list',
)
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
print(
'\n {RED_BOLD}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
expr_form='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {RED_BOLD}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in rdata.iteritems():
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], string_types):
# An errors has occurred
print(
' {RED_BOLD}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {RED_BOLD}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
self.sync_minion_modules_('modules', targets, timeout=timeout)
class AdaptedConfigurationTestCaseMixIn(object):
__slots__ = ()
def get_config(self, config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(self.get_config_file_path(config_for))
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(self.get_config_file_path(config_for))
elif config_for in ('syndic',):
return salt.config.syndic_config(
self.get_config_file_path(config_for),
self.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(self.get_config_file_path('master'))
if config_for not in RUNTIME_CONFIGS:
if config_for in ('master', 'syndic_master'):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(self.get_config_file_path(config_for))
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(self.get_config_file_path(config_for))
)
elif config_for in ('syndic',):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
self.get_config_file_path(config_for),
self.get_config_file_path('minion')
)
)
elif config_for == 'client_config':
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(self.get_config_file_path('master'))
)
return RUNTIME_CONFIGS[config_for]
def get_config_dir(self):
return TMP_CONF_DIR
def get_config_file_path(self, filename):
return os.path.join(TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('master')
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
_salt_client_config_file_name_ = 'master'
__slots__ = ('client', '_salt_client_config_file_name_')
@property
def client(self):
if 'runtime_client' not in RUNTIME_CONFIGS:
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.get_config(self._salt_client_config_file_name_, from_scratch=True)
)
return RUNTIME_CONFIGS['runtime_client']
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = (
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
)
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(
orig[minion_tgt]
)
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
ret = self.run_function('state.single', [function], **kwargs)
return self._check_state_return(ret)
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('sub_minion')
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, string_types):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group('jid')
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function(
'saltutil.find_job', [jid]
)
job_kill = self.run_function('saltutil.kill_job', [jid])
msg = (
'A running state.single was found causing a state lock. '
'Job details: {0!r} Killing Job Returned: {1!r}'.format(
job_data, job_kill
)
)
ret.append('[TEST SUITE ENFORCED]{0}'
'[/TEST SUITE ENFORCED]'.format(msg))
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase):
'''
Execute a test for a shell command
'''
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-ssh
'''
arg_str = '-c {0} -i --priv {1} --roster-file {2} --out=json localhost {3}'.format(self.get_config_dir(), os.path.join(TMP_CONF_DIR, 'key_test'), os.path.join(TMP_CONF_DIR, 'roster'), arg_str)
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, raw=True)
def run_run(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-run
'''
arg_str = '-c {0} {1} --async'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-run', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_run_plus(self, fun, options='', *arg, **kwargs):
'''
Execute Salt run and the salt run function and return the data from
each in a dict
'''
ret = {}
ret['out'] = self.run_run(
'{0} {1} {2}'.format(options, fun, ' '.join(arg)), catch_stderr=kwargs.get('catch_stderr', None)
)
opts = {}
opts.update(self.get_config('master'))
opts.update({'doc': False, 'fun': fun, 'arg': arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['fun'] = runner.run()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script(
'salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode
)
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-cp
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_call(self, arg_str, with_retcode=False, catch_stderr=False):
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None):
'''
Execute salt-cloud
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cloud', arg_str, catch_stderr, timeout)
class ShellCaseCommonTestsMixIn(CheckShellBinaryNameAndVersionMixIn):
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__, SaltStackVersion
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: {0!r}'.format(
err
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed({0!r}) < Expected({1!r})'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
@requires_sshd_server
class SSHCase(ShellCase):
'''
Execute a command via salt-ssh
'''
def _arg_str(self, function, arg):
return '{0} {1}'.format(function, ' '.join(arg))
def run_function(self, function, arg=(), timeout=25, **kwargs):
ret = self.run_ssh(self._arg_str(function, arg))
try:
return json.loads(ret)['localhost']
except Exception:
return ret
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, basestring):
# If it's a basestring , make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
for part in ret.itervalues():
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertInSaltReturn(self, item_to_check, ret, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
_reline(destpath)
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
_reline(destpath)
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_show_states(self):
'''
state.show_states
'''
states = self.run_function('state.show_states')
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function('state.show_states', sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {'newfile': testfile})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {'newfile': file_name})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_issue_30161_unless_and_onlyif_together(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-30161')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {'file_|-unless_false_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is false',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'Empty file',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:20.341753',
'result': True,
'changes': {'new': 'file {0}{1}test.txt created'.format(TMP, os.path.sep)}},
'file_|-unless_true_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:22.936446',
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_true_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is true\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True}}
for id in _expected:
self.assertEqual(sls[id]['comment'], _expected[id]['comment'])
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
# remove testfile added in issue-30161.sls state file
state_file = os.path.join(TMP, 'test.txt')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
bot.py
|
#Twitch chat bot code by Steven Anderson.
#Code is written in Python and is still being developed.
#If you use this code for a chat bot, please only change the variables that you need to change.
#If you want to add/remove commands and do not understand Python, please ask Steven.
#Each section of the code should have comments saying what it is.
#Importing modules for code.
import socket
import time
import re
import select
import requests
import urllib2
import json
import yaml
import thread
import threading
import sqlite3
import os
import ConfigParser
import pntsys
import pushbullet
import random
#Importing variables file
conpar = ConfigParser.SafeConfigParser()
conpar.read('botvar.ini')
#Variables for bot. Text needs to be in ' ' but numbers do not.
#Variables needing to be modified should be located in the botvar.ini file.
server = 'irc.twitch.tv'
port = 443
nick = conpar.get("botname", "nick")
oauth = conpar.get("botname", "oauth")
channel = conpar.get("channelinfo", "channel")
owner = conpar.get("channelinfo", "owner")
userpointsv = conpar.get("points", "viewerpoints")
userpoints = int(userpointsv)
modpointsv = conpar.get("points", "modpoints")
modpoints = int(modpointsv)
staffpointsv = conpar.get("points", "staffpoints")
staffpoints = int(staffpointsv)
btimer = conpar.get("timer", "bottimer")
bottimer = int(btimer)
pntname = conpar.get("commands", "pntname")
pntvalu = conpar.get("commands", "pntval")
pntval = int(pntvalu)
compntreqv = conpar.get("commands", "compntreq")
compntreq = int(compntreqv)
api_key = conpar.get("pbullet", "apikey")
notiov = conpar.get("pbullet", "notifications")
notio = int(notiov)
devnumv = conpar.get("pbullet", "devnumb")
devnum = int(devnumv)
pb = pushbullet.Pushbullet(api_key)
winph = pb.devices[devnum]
tarptime = time.time()
latetime = time.time()
hitime = time.time()
byetime = time.time()
cwtime = time.time()
waittime = 30
tarpwait = 10
vkact = 1
vkee = "nub"
vklistlen = 0
latsass = 0
rafact = 1
rdb = None
vernum = '1.0a'
#Connecting to Twitch irc server.
s = socket.socket()
s.connect((server, port))
s.send("PASS %s\r\n" % (oauth))
s.send("NICK %s\r\n" % nick)
s.send("USER %s\r\n" % (nick))
s.send("CAP REQ :twitch.tv/membership \r\n")
s.send("JOIN %s\r\n" % channel)
print "Connected to channel %s" % channel
#Function to check if the point database is there and create if necessary.
def dbexists():
if os.path.exists('points.db'):
print "Database is there."
else:
conn = sqlite3.connect('points.db')
cur = conn.cursor()
cur.execute("CREATE TABLE Users(username TEXT, current INT, total INT)")
conn.commit()
conn.close()
print "Created Database"
#Run point database check.
dbexists()
#Connect to point database
lconn = sqlite3.connect('points.db')
lcur = lconn.cursor()
#Function to check if command database is there and create if necessary.
def comdbexists():
if os.path.exists('custcoms.db'):
print "Coms Database is there."
else:
comdbconn = sqlite3.connect('custcoms.db')
comdbcur = comdbconn.cursor()
comdbcur.execute("CREATE TABLE Comsdb(com TEXT, lvl TEXT, commsg TEXT)")
comdbconn.commit()
comdbconn.close()
print "Created Coms Database"
#Run command database check.
comdbexists()
#Connect to command database
comconn = sqlite3.connect('custcoms.db')
comcur = comconn.cursor()
print "Bot Started"
#Twitch api array pull. modlist is for mod commands. comuser is for command user. modlistd, userlist, and tstafflist is for the Point System.
def modlist():
tapi = 'https://tmi.twitch.tv/group/user/%s/chatters' % owner
response = urllib2.urlopen(tapi)
tapiinfo = yaml.load(response)
return tapiinfo["chatters"]["moderators"]
def modlistd():
tdapi = 'https://tmi.twitch.tv/group/user/%s/chatters' % owner
dresponse = urllib2.urlopen(tdapi)
tdapiinfo = json.load(dresponse)
return tdapiinfo["chatters"]["moderators"]
def userlist():
tuapi = 'https://tmi.twitch.tv/group/user/%s/chatters' % owner
uresponse = urllib2.urlopen(tuapi)
tuapiinfo = json.load(uresponse)
return tuapiinfo["chatters"]["viewers"]
def tstafflist():
tsapi = 'https://tmi.twitch.tv/group/user/%s/chatters' % owner
tsresponse = urllib2.urlopen(tsapi)
tsapiinfo = json.load(tsresponse)
stafflist = tsapiinfo["chatters"]["staff"]
adminslist = tsapiinfo["chatters"]["admins"]
gmodslist = tsapiinfo["chatters"]["global_mods"]
combinedlist = stafflist + adminslist + gmodslist
return combinedlist
def comuser():
complete = msg[1: ].split(":", 1) # Parse the message into useful data sender[0] is username
info = complete[0].split(" ")
msgpart = complete[1]
sender = info[0].split("!")
return sender[0]
#Point System start
psys = threading.Thread(target = pntsys.pointsys)
psys.start()
#Setup for raffle system
vkconn = sqlite3.connect(':memory:')
vkcur = vkconn.cursor()
#Commands for bot. Do not change, remove, or add unless you understand the command setup.
#Custom commands can be added or removed using the addcomd and delcomd commands respectfully.
while(1):
msg = s.recv(1024)
#print msg
if msg.find('PING') != -1:
s.send('PONG ' + msg.split()[1] + '\r\n')
if msg.find('PRIVMSG') != -1:
tmp = msg.split('PRIVMSG')[-1]
if tmp.find('!') != -1:
regex = re.compile("(!\S*)")
r = regex.findall(tmp)
#check com database
comr = r[0]
comdbr = 'SELECT * FROM Comsdb WHERE com="%s"' % comr
comcur.execute(comdbr)
comdbf = comcur.fetchone()
if comdbf is not None:
rdb = comdbf[0]
#print rdb
for i in r:
if i == '!notice':
if comuser() == 'stevolime':
s.send("PRIVMSG %s :Senpai noticed me!\r\n" % channel)
#bot info
elif i == "!version":
s.send("PRIVMSG %s :Version %s. Written in Python.\r\n" % (channel, vernum))
elif i == "!botinfo":
s.send("PRIVMSG %s :This bot is using the StroderBot software in development by StevoLime using python and is on Version %s.\r\n" % (channel, vernum))
elif i == "!loyinfo":
s.send("PRIVMSG %s :If you would like to know your points use !my%ss. Mods can also run !%ss for %s values and timer.\r\n" % (channel, pntname, pntname, pntname))
#some fun commands
elif i == "!fun":
s.send("PRIVMSG %s :!powers and !random\r\n" % channel)
#point system commands
elif i == "!my%ss" % pntname:
lcuser = comuser()
lsql = 'SELECT * FROM Users WHERE username="%s"' % (lcuser)
lcur.execute(lsql)
lcuserd = lcur.fetchone()
if lcuserd != None:
s.send("PRIVMSG %s :%s currently has %s %ss and has earned a total of %s %ss.\r\n" % (channel, comuser(), lcuserd[1], pntname, lcuserd[2], pntname))
else:
s.send("PRIVMSG %s :%s currently has no %ss.\r\n" % (channel, comuser(), pntname))
elif i == "!%ss" % pntname:
if comuser() in modlist():
s.send("PRIVMSG %s :%s values are currently %s for mods and %s for viewers every %s seconds.\r\n" % (channel, pntname.capitalize(), modpoints, userpoints, bottimer))
elif i == "!%scheck" % pntname:
if comuser() in modlist():
tregex = re.compile("(!\S*\s\S*)")
tr = tregex.findall(tmp)
trs = tr[0]
trsplit = trs.split()
#update dingbot with this code for point check
if len(trsplit) == 2:
lsql = 'SELECT * FROM Users WHERE username="%s"' % (trsplit[1])
lcur.execute(lsql)
lcuserd = lcur.fetchone()
if lcuserd != None:
s.send("PRIVMSG %s :%s currently has %s %ss and has earned a total of %s %ss.\r\n" % (channel, trsplit[1], lcuserd[1], pntname, lcuserd[2], pntname))
else:
s.send("PRIVMSG %s :%s currently has no %ss.\r\n" % (channel, trsplit[1], pntname))
else:
s.send("PRIVMSG %s :Please specify a user.\r\n" % channel)
elif i == "!give%ss" % pntname:
if comuser() == owner:
tregex = re.compile("(!\S*\s\S*\s\S*)")
tr = tregex.findall(tmp)
trs = tr[0]
trsplit = trs.split()
if len(trsplit) == 3:
trpnt = trsplit[2]
if trpnt.isdigit():
lsql = 'SELECT * FROM Users WHERE username="%s"' % (trsplit[1])
lcur.execute(lsql)
lcuserd = lcur.fetchone()
if lcuserd == None:
lcur.execute("INSERT INTO Users VALUES(?, ?, ?);", (trsplit[1], trsplit[2], trsplit[2]))
lconn.commit()
s.send("PRIVMSG %s :%s gave %s %s %ss.\r\n" % (channel, owner, trsplit[1], trsplit[2], pntname))
else:
lcur.execute("UPDATE Users SET current = current + ?, total = total + ? WHERE Username = ?;", (trsplit[2], trsplit[2], trsplit[1]))
lconn.commit()
s.send("PRIVMSG %s :%s gave %s %s %ss.\r\n" % (channel, owner, trsplit[1], trsplit[2], pntname))
else:
s.send("PRIVMSG %s :%s is not a number. Try again.\r\n" % (channel, trsplit[2]))
else:
s.send("PRIVMSG %s :Not enough info. Please try again. !give%ss <username> <points> \r\n" % (channel, pntname))
elif i == "!take%ss" % pntname:
if comuser() == owner:
tapregex = re.compile("(!\S*\s\S*\s\S*)")
tapr = tapregex.findall(tmp)
taprs = tapr[0]
taprsplit = taprs.split()
if len(taprsplit) == 3:
taprpnt = taprsplit[2]
if taprpnt.isdigit():
lsql = 'SELECT * FROM Users WHERE username="%s"' % (taprsplit[1])
lcur.execute(lsql)
lcuserd = lcur.fetchone()
lcuint = int(lcuserd[1])
tapint = int(taprpnt)
if lcuserd == None:
s.send("PRIVMSG %s :%s has no %ss.\r\n" % (channel, taprsplit[1], pntname))
else:
if tapint >= lcuint:
tapuser = taprsplit[1]
#print tapuser
tapsql = 'UPDATE Users SET current=0, total=total WHERE username="%s"' % (tapuser)
lcur.execute(tapsql)
#lcur.execute("UPDATE Users SET current = 0, total = total WHERE Username = ?;", tapuser)
lconn.commit()
s.send("PRIVMSG %s :%s took all %s from %s.\r\n" % (channel, owner, pntname, taprsplit[1]))
else:
lcur.execute("UPDATE Users SET current = current - ? WHERE Username = ?;", (taprsplit[2], taprsplit[1]))
lconn.commit()
s.send("PRIVMSG %s :%s took from %s %s %ss.\r\n" % (channel, owner, taprsplit[1], taprsplit[2], pntname))
else:
s.send("PRIVMSG %s :%s is not a number. Try again.\r\n" % (channel, taprsplit[2]))
else:
s.send("PRIVMSG %s :Not enough info. Please try again. !take%ss <username> <points> \r\n" % (channel, pntname))
elif i == "!giveall":
if comuser() in modlist():
tregex = re.compile("(!\S*\s\S*)")
tr = tregex.findall(tmp)
trs = tr[0]
trsplit = trs.split()
if len(trsplit) == 2:
trpnt = trsplit[1]
if trpnt.isdigit():
listall = modlistd() + userlist() + tstafflist()
allmax = len(listall)
allcount = allmax - 1
allcur = 0
s.send("PRIVMSG %s :Giving everyone %s %ss. Please wait.\r\n" % (channel, trsplit[1], pntname))
while allcur <= allcount:
listalluser = listall[allcur]
lsql = 'SELECT * FROM Users WHERE username="%s"' % (listalluser)
lcur.execute(lsql)
lcuserd = lcur.fetchone()
if lcuserd == None:
lcur.execute("INSERT INTO Users VALUES(?, ?, ?);", (listalluser, trsplit[1], trsplit[1]))
print "User %s added." % listalluser
allcur = allcur + 1
lconn.commit()
else:
lcur.execute("UPDATE Users SET current = current + ?, total = total + ? WHERE Username = ?;", (trsplit[1], trsplit[1], listalluser))
print "Added points to %s" % listalluser
allcur = allcur + 1
lconn.commit()
s.send("PRIVMSG %s :Everyone was given %s %ss.\r\n" % (channel, trsplit[1], pntname))
else:
s.send("PRIVMSG %s :%s is not a number. Try again.\r\n" % (channel, trsplit[1]))
else:
s.send("PRIVMSG %s :Not enough info. Please try again. !giveall <points> \r\n" % channel)
#fun commands
elif i == "!powers":
powuser = comuser()
powsql = 'SELECT * FROM Users WHERE username="%s"' % (powuser)
lcur.execute(powsql)
powuserd = lcur.fetchone()
powurl = 'http://unitinggamers.com/twitch/php/powers.php'
powresponse = urllib2.urlopen(powurl)
powresp = yaml.load(powresponse)
if powuserd[pntval] >= compntreq:
s.send("PRIVMSG %s :%s has the power of %s.\r\n" % (channel, powuser, powresp))
time.sleep(10)
elif i == "!tarp":
if (time.time() - tarptime) >= tarpwait:
tarpuser = comuser()
tarp = random.randrange(1,100)
tarptime = time.time()
if tarp <= 90:
s.send("PRIVMSG %s :/timeout %s 1\r\n" % (channel, tarpuser))
s.send("PRIVMSG %s :%s has been placed into the bloody sheets of the murder tarp.\r\n" % (channel, tarpuser))
else:
s.send("PRIVMSG %s :%s has escaped the damning folds of the murder tarp... for now.\r\n" % (channel, tarpuser))
elif i == "!slap":
if comuser() in modlist():
slapuser = comuser()
sregex = re.compile("(!\S*\s\S*)")
sr = sregex.findall(tmp)
srs = sr[0]
srsplit = srs.split()
srpnt = srsplit[1]
ranslap = random.randrange(1, 20)
if ranslap <= 10:
s.send("PRIVMSG %s :%s has slapped the hoe %s.\r\n" % (channel, slapuser.capitalize(), srpnt.capitalize()))
elif ranslap >= 11:
s.send("PRIVMSG %s :%s has pimp slapped %s.\r\n" % (channel, slapuser.capitalize(), srpnt.capitalize()))
#notification commands
#elif i == "!notiotest":
# if comuser() == owner:
# print "Notifications: 1 is on, 2 is off."
# print notio
#winph.push_note("Notification Settings.", "Notio is %s" % notio)
# s.send("PRIVMSG %s :%s, the value is %s.\r\n" % (channel, owner, notio))
elif i == "!notdev":
if comuser() == owner:
print "You devices for PushBullet. Count left to right starting at 0."
print "The number is used in the config file to set your device."
print "Config default is 0."
print(pb.devices)
s.send("PRIVMSG %s :Check command window.\r\n" % channel)
elif i == "!notify":
if comuser() == owner:
if notio == 1:
notio = 2
s.send("PRIVMSG %s :Notification commands turned off.\r\n" % channel)
elif notio == 2:
notio = 1
s.send("PRIVMSG %s :Notification commands turned on.\r\n" % channel)
elif i == "!sayhi":
if notio == 1:
if (time.time() - hitime) > (waittime / 2):
hitime = time.time()
hicom = comuser()
winph.push_note("Hey strimmer!", "%s says Hi." % hicom.capitalize())
s.send("PRIVMSG %s :%s has told strimmer hi.\r\n" % (channel, hicom.capitalize()))
elif i == "!saybye":
if notio == 1:
if (time.time() - byetime) > (waittime / 2):
byetime = time.time()
byecom = comuser()
winph.push_note("Strimmer", "%s says Bye." % byecom.capitalize())
s.send("PRIVMSG %s :%s has told strimmer bye.\r\n" % (channel, byecom.capitalize()))
elif i == "!late":
if notio == 1:
if (time.time() - latetime) > waittime:
lapi = 'https://api.twitch.tv/kraken/streams/%s' % owner
lres = urllib2.urlopen(lapi)
strmnfo = yaml.load(lres)
print latsass
#print strmnfo["stream"]
latetime = time.time()
if latsass == 5:
s.send("PRIVMSG %s :/timeout %s 1\r\n" % (channel, comuser()))
latsass = 0
else:
if strmnfo["stream"] == None:
comun = comuser()
winph.push_note("You are late!", "According to %s." % comun.capitalize())
s.send("PRIVMSG %s :Hey %s, Strimmer has been notified.\r\n" % (channel, comun.capitalize()))
latsass = latsass + 1
else:
s.send("PRIVMSG %s :Strimmer is here you pleb.\r\n" % channel)
latsass = latsass + 1
#votekick system
elif i == "!votekick":
if vkact == 1:
vkuser = comuser()
vksql = 'SELECT * FROM Users WHERE username="%s"' % (vkuser)
lcur.execute(vksql)
vkuserd = lcur.fetchone()
vkregex = re.compile("(!\S*\s\S*)")
vkr = vkregex.findall(tmp)
vkrs = vkr[0]
vkrsplit = vkrs.split()
vkee = vkrsplit[1]
vktwapi = 'https://tmi.twitch.tv/group/user/%s/chatters' % owner
vktwresponse = urllib2.urlopen(vktwapi)
vktwapiinfo = json.load(vktwresponse)
vktwmods = vktwapiinfo["chatters"]["moderators"]
vktwviewers = vktwapiinfo["chatters"]["viewers"]
vktwstaff = vktwapiinfo["chatters"]["staff"]
vktwadmins = vktwapiinfo["chatters"]["admins"]
vktwgmods = vktwapiinfo["chatters"]["global_mods"]
vktwfulllist = vktwmods + vktwviewers + vktwstaff + vktwadmins + vktwgmods
vklistlen = len(vktwfulllist)
if vkuserd[pntval] >= 1000:
vkcur.execute("CREATE TABLE VoteKick(username TEXT, vote INT)")
#yes = 1 no = 2
vkconn.commit()
vkuv = 1
vkcur.execute("INSERT INTO VoteKick VALUES(?, ?);",(comuser(), vkuv))
vkconn.commit()
vkact = 2
s.send("PRIVMSG %s :%s has started a votekick on %s. !f1 votes yes. !f2 votes no.\r\n" % (channel, vkuser.capitalize(), vkee))
else:
s.send("PRIVMSG %s :You can't start a votekick yet.\r\n" % channel)
elif i == "!f1":
if vkact == 2:
vkyu = comuser()
vkysql = 'SELECT * FROM VoteKick WHERE username="%s"' % vkyu
vkcur.execute(vkysql)
vkyesusercheck = vkcur.fetchone()
if vkyesusercheck == None:
vkuv = 1
vkcur.execute("INSERT INTO VoteKick VALUES(?, ?);",(comuser(), vkuv))
vkconn.commit()
s.send("PRIVMSG %s :%s has voted yes.\r\n" % (channel, comuser()))
vkonesql = 'SELECT username FROM VoteKick WHERE vote=1'
vkcur.execute(vkonesql)
vkcurd = vkcur.fetchall()
vkyesnum = len(vkcurd)
if vkyesnum >= (vklistlen / 2):
s.send("PRIVMSG %s :/timeout %s 5\r\n" % (channel, vkee))
vkcur.execute("DROP TABLE VoteKick;")
vkconn.commit()
vkact = 1
s.send("PRIVMSG %s :Votekick on %s is successful.\r\n" % (channel, vkee))
else:
s.send("PRIVMSG %s :There is no votekick currently.\r\n" % (channel))
elif i == "!f2":
if vkact == 2:
vknu = comuser()
vknsql = 'SELECT * FROM VoteKick WHERE username="%s"' % vknu
vkcur.execute(vknsql)
vknousercheck = vkcur.fetchone()
if vknousercheck == None:
nvkuv = 2
vkcur.execute("INSERT INTO VoteKick VALUES(?, ?);",(comuser(), nvkuv))
vkconn.commit()
s.send("PRIVMSG %s :%s has voted no.\r\n" % (channel, comuser()))
vktwosql = 'SELECT username FROM VoteKick WHERE vote=2'
vkcur.execute(vktwosql)
vkcurdt = vkcur.fetchall()
vknonum = len(vkcurdt)
if vknonum >= (vklistlen / 2):
s.send("PRIVMSG %s :Votekick on %s is unsuccessful.\r\n" % (channel, vkee))
vkcur.execute("DROP TABLE VoteKick;")
vkconn.commit()
vkact = 1
else:
s.send("PRIVMSG %s :There is no votekick currently.\r\n" % (channel))
elif i == "!f3":
if vkact == 2:
s.send("PRIVMSG %s :Does it look like I said !f3 you scrub?\r\n" % (channel))
elif i == "!f4":
if vkact == 2:
s.send("PRIVMSG %s :Attack! Kippa\r\n" % (channel))
s.send("PRIVMSG %s :/timeout %s 1\r\n" % (channel, comuser()))
elif i == "!f64":
if vkact == 2:
fuser = comuser()
fsql = 'SELECT * FROM Users WHERE username="%s"' % (fuser)
lcur.execute(fsql)
lcuserd = lcur.fetchone()
if lcuserd == None:
s.send("PRIVMSG %s :I am not a Nintendo. And you now owe Nintendo your %ss.\r\n" % (channel, pntname))
else:
fuserp = int(lcuserd[1])
if fuserp <= 64:
fasql = 'UPDATE Users SET current=0, total=total WHERE username="%s"' % (fuser)
lcur.execute(fasql)
lconn.commit()
s.send("PRIVMSG %s :I am not a Nintendo. And now Nintendo has taken your %ss.\r\n" % (channel, pntname))
else:
ftsql = 'UPDATE Users SET current = current - 64, total=total WHERE username="%s"' % (fuser)
lcur.execute(ftsql)
lconn.commit()
s.send("PRIVMSG %s :I am not a Nintendo. And now Nintendo has taken 64 of your %ss.\r\n" % (channel, pntname))
elif i == "!f69":
if vkact == 2:
s.send("PRIVMSG %s :Oh Hell No! Not in my chat! Kippa\r\n" % (channel))
s.send("PRIVMSG %s :/timeout %s 1\r\n" % (channel, comuser()))
elif i == "!vkstop":
if comuser() in modlist():
if vkact == 2:
vkcur.execute("DROP TABLE VoteKick;")
vkconn.commit()
vkact = 1
s.send("PRIVMSG %s :A mod has stopped the votekick.\r\n" % (channel))
#raffle commands
elif i == "!vwrraf":
if comuser() in modlist():
if rafact == 1:
vrtwapi = 'https://tmi.twitch.tv/group/user/%s/chatters' % owner
vrtwresponse = urllib2.urlopen(vrtwapi)
vrtwapilist = yaml.load(vrtwresponse)
vrmodsu = vrtwapilist["chatters"]["moderators"]
vrmodsu.remove(owner)
vrmodsu.remove(nick)
vrstaff = vrtwapilist["chatters"]["staff"]
vradmins = vrtwapilist["chatters"]["admins"]
vrgmods = vrtwapilist["chatters"]["global_mods"]
vrviewers = vrtwapilist["chatters"]["viewers"]
vrusrlst = vrmodsu + vrstaff + vradmins + vrgmods + vrviewers
vrlstlen = len(vrusrlst)
if vrlstlen > 1:
vrranmax = vrlstlen - 1
else:
vrranmax = vrlstlen
vrnum = random.randrange(0,vrranmax)
vrwin = vrusrlst[vrnum]
vrwinapi = 'https://api.twitch.tv/kraken/users/%s/follows/channels/%s' % (vrwin, owner)
vrwinresp = requests.get(vrwinapi)
vrwininfo = vrwinresp.json()
if "message" in vrwininfo:
s.send("PRIVMSG %s :Raffle winner is %s, and they ARE NOT a follower. BibleThump\r\n" % (channel, vrwin))
else:
s.send("PRIVMSG %s :Raffle winner is %s, and they are a follower. deIlluminati\r\n" % (channel, vrwin))
else:
s.send("PRIVMSG %s :Raffle is already active.\r\n" % channel)
#custom commands
elif i == "!addcomd":
if comuser() in modlist():
ncall = tmp.split('|')
if len(ncall) == 4:
nccomm = ncall[1].lstrip()
nccom = nccomm.rstrip()
nclvll = ncall[2].lstrip()
nclvl = nclvll.rstrip()
ncmsgg = ncall[3].lstrip()
ncmsg = ncmsgg.rstrip()
acomsql = 'SELECT * FROM Comsdb WHERE com="%s"' % (nccom)
comcur.execute(acomsql)
actest = comcur.fetchone()
if actest == None:
comcur.execute("INSERT INTO Comsdb VALUES(?, ?, ?);", (nccom, nclvl, ncmsg))
comconn.commit()
s.send("PRIVMSG %s :Command has been added.\r\n" % channel)
else:
s.send("PRIVMSG %s :Not enough info for command. Please try again.\r\n" % channel)
elif i == "!delcomd":
if comuser() in modlist():
dcall = tmp.split('|')
if len(dcall) == 2:
dccn = dcall[1]
dccomm = dccn.lstrip()
dccom = dccomm.rstrip()
dcomsql = 'SELECT * FROM Comsdb WHERE com="%s"' % (dccom)
comcur.execute(dcomsql)
dctest = comcur.fetchone()
if dctest is not None:
delcomsql = 'DELETE FROM Comsdb WHERE com="%s"' % (dccom)
comcur.execute(delcomsql)
comconn.commit()
s.send("PRIVMSG %s :Command has been removed.\r\n" % channel)
else:
s.send("PRIVMSG %s :No command specified. Please try again.\r\n" % channel)
elif i == rdb:
comusernc = comuser()
if comdbf is not None:
comlvl = comdbf[1]
if comlvl == 'ol':
if comusernc == owner:
s.send("PRIVMSG %s :%s\r\n" % (channel, comdbf[2]))
elif comlvl == 'ml':
if comusernc in modlist():
s.send("PRIVMSG %s :%s\r\n" % (channel, comdbf[2]))
elif comlvl == 'vl':
s.send("PRIVMSG %s :%s\r\n" % (channel, comdbf[2]))
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
HybridConnection, RampUpRule, UnauthenticatedClientAction,
ManagedServiceIdentity, DeletedAppRestoreRequest,
DefaultErrorResponseException, SnapshotRestoreRequest,
SnapshotRecoverySource, SwiftVirtualNetwork)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.mgmt.network.models import Subnet, Delegation
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter
from azure.cli.core.commands.client_factory import UA_AGENT
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, does_app_already_exist, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (RUNTIME_TO_IMAGE, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, build_remote,
timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src,
is_remote_build=False,
timeout=timeout,
slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, is_remote_build=False,
timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
headers['User-Agent'] = UA_AGENT
if is_remote_build:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responses with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
website_run_from_package = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if not ((enable_oryx_build is True) and (scm_do_build_during_deployment is True)):
logger.warning("Setting ENABLE_ORYX_BUILD to true")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=true",
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
time.sleep(5)
if website_run_from_package is not None:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
time.sleep(5)
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if not ((enable_oryx_build is False) and (scm_do_build_during_deployment is False)):
logger.warning("Setting ENABLE_ORYX_BUILD to false")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=false",
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
time.sleep(5)
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(src_plan_info) or is_plan_elastic_premium(src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(dest_plan_instance) or is_plan_elastic_premium(dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, reserved_instance_count=None, php_version=None, # pylint: disable=unused-argument
python_version=None, net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if reserved_instance_count is not None:
reserved_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', reserved_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['reserved_instance_count']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
return _generic_site_operation(cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime.lower() not in RUNTIME_TO_IMAGE:
raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime))
site_config.linux_fx_version = _format_fx_version(RUNTIME_TO_IMAGE[runtime.lower()])
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='10.14.1'))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
time.sleep(2)
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name == "defaultSender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name == "defaultSender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z == "resourceGroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name == "Microsoft.Web/serverFarms":
delegated = True
if not delegated:
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=Subnet(name="subnet",
address_prefix=subnetObj.address_prefix,
delegations=[Delegation(name="delegation",
service_name="Microsoft" +
".Web/serverFarms")]))
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False):
import os
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_create_new_app = does_app_already_exist(cmd, name)
os_name = detect_os_form_src(src_dir)
lang_details = get_lang_from_content(src_dir)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please "
"re-run command with the correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("webapp %s doesn't exist", name)
sku = get_sku_to_use(src_dir, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, False, sku, 1 if _is_linux else None, location)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, tags={"cli": 'webapp_up'},
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
|
unlogger.py
|
#!/usr/bin/env python3
import argparse
import os
import sys
import zmq
import time
import signal
import multiprocessing
from uuid import uuid4
from collections import namedtuple
from collections import deque
from datetime import datetime
from cereal import log as capnp_log
from cereal.services import service_list
from cereal.messaging import pub_sock, MultiplePublishersError
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common import realtime
from common.transformations.camera import eon_f_frame_size, tici_f_frame_size
from tools.lib.kbhit import KBHit
from tools.lib.logreader import MultiLogIterator
from tools.lib.route import Route
from tools.lib.route_framereader import RouteFrameReader
# Commands.
SetRoute = namedtuple("SetRoute", ("name", "start_time", "data_dir"))
SeekAbsoluteTime = namedtuple("SeekAbsoluteTime", ("secs",))
SeekRelativeTime = namedtuple("SeekRelativeTime", ("secs",))
TogglePause = namedtuple("TogglePause", ())
StopAndQuit = namedtuple("StopAndQuit", ())
VIPC_RGB = "rgb"
VIPC_YUV = "yuv"
class UnloggerWorker(object):
def __init__(self):
self._frame_reader = None
self._cookie = None
self._readahead = deque()
def run(self, commands_address, data_address, pub_types):
zmq.Context._instance = None
commands_socket = zmq.Context.instance().socket(zmq.PULL)
commands_socket.connect(commands_address)
data_socket = zmq.Context.instance().socket(zmq.PUSH)
data_socket.connect(data_address)
poller = zmq.Poller()
poller.register(commands_socket, zmq.POLLIN)
# We can't publish frames without roadEncodeIdx, so add when it's missing.
if "roadCameraState" in pub_types:
pub_types["roadEncodeIdx"] = None
# gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_OBJECTS | gc.DEBUG_STATS | gc.DEBUG_SAVEALL |
# gc.DEBUG_UNCOLLECTABLE)
# TODO: WARNING pycapnp leaks memory all over the place after unlogger runs for a while, gc
# pauses become huge because there are so many tracked objects solution will be to switch to new
# cython capnp
try:
route = None
while True:
while poller.poll(0.) or route is None:
cookie, cmd = commands_socket.recv_pyobj()
route = self._process_commands(cmd, route, pub_types)
# **** get message ****
self._read_logs(cookie, pub_types)
self._send_logs(data_socket)
finally:
if self._frame_reader is not None:
self._frame_reader.close()
data_socket.close()
commands_socket.close()
def _read_logs(self, cookie, pub_types):
fullHEVC = capnp_log.EncodeIndex.Type.fullHEVC
lr = self._lr
while len(self._readahead) < 1000:
route_time = lr.tell()
msg = next(lr)
typ = msg.which()
if typ not in pub_types:
continue
# **** special case certain message types ****
if typ == "roadEncodeIdx" and msg.roadEncodeIdx.type == fullHEVC:
# this assumes the roadEncodeIdx always comes before the frame
self._frame_id_lookup[
msg.roadEncodeIdx.frameId] = msg.roadEncodeIdx.segmentNum, msg.roadEncodeIdx.segmentId
#print "encode", msg.roadEncodeIdx.frameId, len(self._readahead), route_time
self._readahead.appendleft((typ, msg, route_time, cookie))
def _send_logs(self, data_socket):
while len(self._readahead) > 500:
typ, msg, route_time, cookie = self._readahead.pop()
smsg = msg.as_builder()
if typ == "roadCameraState":
frame_id = msg.roadCameraState.frameId
# Frame exists, make sure we have a framereader.
# load the frame readers as needed
s1 = time.time()
try:
img = self._frame_reader.get(frame_id, pix_fmt="rgb24")
except Exception:
img = None
fr_time = time.time() - s1
if fr_time > 0.05:
print("FRAME(%d) LAG -- %.2f ms" % (frame_id, fr_time*1000.0))
if img is not None:
img = img[:, :, ::-1] # Convert RGB to BGR, which is what the camera outputs
img = img.flatten()
bts = img.tobytes()
smsg.roadCameraState.image = bts
extra = (smsg.roadCameraState.frameId, smsg.roadCameraState.timestampSof, smsg.roadCameraState.timestampEof)
data_socket.send_pyobj((cookie, VIPC_RGB, msg.logMonoTime, route_time, extra), flags=zmq.SNDMORE)
data_socket.send(bts, copy=False)
img_yuv = self._frame_reader.get(frame_id, pix_fmt="yuv420p")
if img_yuv is not None:
data_socket.send_pyobj((cookie, VIPC_YUV, msg.logMonoTime, route_time, extra), flags=zmq.SNDMORE)
data_socket.send(img_yuv.flatten().tobytes(), copy=False)
data_socket.send_pyobj((cookie, typ, msg.logMonoTime, route_time), flags=zmq.SNDMORE)
data_socket.send(smsg.to_bytes(), copy=False)
def _process_commands(self, cmd, route, pub_types):
seek_to = None
if route is None or (isinstance(cmd, SetRoute) and route.name != cmd.name):
seek_to = cmd.start_time
route = Route(cmd.name, cmd.data_dir)
self._lr = MultiLogIterator(route.log_paths(), wraparound=True)
if self._frame_reader is not None:
self._frame_reader.close()
if "roadCameraState" in pub_types or "roadEncodeIdx" in pub_types:
# reset frames for a route
self._frame_id_lookup = {}
self._frame_reader = RouteFrameReader(
route.camera_paths(), None, self._frame_id_lookup, readahead=True)
# always reset this on a seek
if isinstance(cmd, SeekRelativeTime):
seek_to = self._lr.tell() + cmd.secs
elif isinstance(cmd, SeekAbsoluteTime):
seek_to = cmd.secs
elif isinstance(cmd, StopAndQuit):
exit()
if seek_to is not None:
print("seeking", seek_to)
if not self._lr.seek(seek_to):
print("Can't seek: time out of bounds")
else:
next(self._lr) # ignore one
return route
def _get_address_send_func(address):
sock = pub_sock(address)
return sock.send
def _get_vipc_server(length):
w, h = {3 * w * h: (w, h) for (w, h) in [tici_f_frame_size, eon_f_frame_size]}[length]
vipc_server = VisionIpcServer("camerad")
vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, w, h)
vipc_server.create_buffers(VisionStreamType.VISION_STREAM_YUV_BACK, 40, False, w, h)
vipc_server.start_listener()
return vipc_server
def unlogger_thread(command_address, forward_commands_address, data_address, run_realtime,
address_mapping, publish_time_length, bind_early, no_loop, no_visionipc):
# Clear context to avoid problems with multiprocessing.
zmq.Context._instance = None
context = zmq.Context.instance()
command_sock = context.socket(zmq.PULL)
command_sock.bind(command_address)
forward_commands_socket = context.socket(zmq.PUSH)
forward_commands_socket.bind(forward_commands_address)
data_socket = context.socket(zmq.PULL)
data_socket.bind(data_address)
# Set readahead to a reasonable number.
data_socket.setsockopt(zmq.RCVHWM, 10000)
poller = zmq.Poller()
poller.register(command_sock, zmq.POLLIN)
poller.register(data_socket, zmq.POLLIN)
if bind_early:
send_funcs = {
typ: _get_address_send_func(address)
for typ, address in address_mapping.items()
}
# Give subscribers a chance to connect.
time.sleep(0.1)
else:
send_funcs = {}
start_time = float("inf")
printed_at = 0
generation = 0
paused = False
reset_time = True
prev_msg_time = None
vipc_server = None
while True:
evts = dict(poller.poll())
if command_sock in evts:
cmd = command_sock.recv_pyobj()
if isinstance(cmd, TogglePause):
paused = not paused
if paused:
poller.modify(data_socket, 0)
else:
poller.modify(data_socket, zmq.POLLIN)
else:
# Forward the command the the log data thread.
# TODO: Remove everything on data_socket.
generation += 1
forward_commands_socket.send_pyobj((generation, cmd))
if isinstance(cmd, StopAndQuit):
return
reset_time = True
elif data_socket in evts:
msg_generation, typ, msg_time, route_time, *extra = data_socket.recv_pyobj(flags=zmq.RCVMORE)
msg_bytes = data_socket.recv()
if msg_generation < generation:
# Skip packets.
continue
if no_loop and prev_msg_time is not None and prev_msg_time > msg_time + 1e9:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
prev_msg_time = msg_time
msg_time_seconds = msg_time * 1e-9
if reset_time:
msg_start_time = msg_time_seconds
real_start_time = realtime.sec_since_boot()
start_time = min(start_time, msg_start_time)
reset_time = False
if publish_time_length and msg_time_seconds - start_time > publish_time_length:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
# Print time.
if abs(printed_at - route_time) > 5.:
print("at", route_time)
printed_at = route_time
if typ not in send_funcs and typ not in [VIPC_RGB, VIPC_YUV]:
if typ in address_mapping:
# Remove so we don't keep printing warnings.
address = address_mapping.pop(typ)
try:
print("binding", typ)
send_funcs[typ] = _get_address_send_func(address)
except Exception as e:
print("couldn't replay {}: {}".format(typ, e))
continue
else:
# Skip messages that we are not registered to publish.
continue
# Sleep as needed for real time playback.
if run_realtime:
msg_time_offset = msg_time_seconds - msg_start_time
real_time_offset = realtime.sec_since_boot() - real_start_time
lag = msg_time_offset - real_time_offset
if lag > 0 and lag < 30: # a large jump is OK, likely due to an out of order segment
if lag > 1:
print("sleeping for", lag)
time.sleep(lag)
elif lag < -1:
# Relax the real time schedule when we slip far behind.
reset_time = True
# Send message.
try:
if typ in [VIPC_RGB, VIPC_YUV]:
if not no_visionipc:
if vipc_server is None:
vipc_server = _get_vipc_server(len(msg_bytes))
i, sof, eof = extra[0]
stream = VisionStreamType.VISION_STREAM_RGB_BACK if typ == VIPC_RGB else VisionStreamType.VISION_STREAM_YUV_BACK
vipc_server.send(stream, msg_bytes, i, sof, eof)
else:
send_funcs[typ](msg_bytes)
except MultiplePublishersError:
del send_funcs[typ]
def timestamp_to_s(tss):
return time.mktime(datetime.strptime(tss, '%Y-%m-%d--%H-%M-%S').timetuple())
def absolute_time_str(s, start_time):
try:
# first try if it's a float
return float(s)
except ValueError:
# now see if it's a timestamp
return timestamp_to_s(s) - start_time
def _get_address_mapping(args):
if args.min is not None:
services_to_mock = [
'deviceState', 'can', 'pandaState', 'sensorEvents', 'gpsNMEA', 'roadCameraState', 'roadEncodeIdx',
'modelV2', 'liveLocation',
]
elif args.enabled is not None:
services_to_mock = args.enabled
else:
services_to_mock = service_list.keys()
address_mapping = {service_name: service_name for service_name in services_to_mock}
address_mapping.update(dict(args.address_mapping))
for k in args.disabled:
address_mapping.pop(k, None)
non_services = set(address_mapping) - set(service_list)
if non_services:
print("WARNING: Unknown services {}".format(list(non_services)))
return address_mapping
def keyboard_controller_thread(q, route_start_time):
print("keyboard waiting for input")
kb = KBHit()
while 1:
c = kb.getch()
if c == 'm': # Move forward by 1m
q.send_pyobj(SeekRelativeTime(60))
elif c == 'M': # Move backward by 1m
q.send_pyobj(SeekRelativeTime(-60))
elif c == 's': # Move forward by 10s
q.send_pyobj(SeekRelativeTime(10))
elif c == 'S': # Move backward by 10s
q.send_pyobj(SeekRelativeTime(-10))
elif c == 'G': # Move backward by 10s
q.send_pyobj(SeekAbsoluteTime(0.))
elif c == "\x20": # Space bar.
q.send_pyobj(TogglePause())
elif c == "\n":
try:
seek_time_input = input('time: ')
seek_time = absolute_time_str(seek_time_input, route_start_time)
# If less than 60, assume segment number
if seek_time < 60:
seek_time *= 60
q.send_pyobj(SeekAbsoluteTime(seek_time))
except Exception as e:
print("Time not understood: {}".format(e))
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Mock openpilot components by publishing logged messages.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("route_name", type=(lambda x: x.replace("#", "|")), nargs="?",
help="The route whose messages will be published.")
parser.add_argument("data_dir", nargs='?', default=os.getenv('UNLOGGER_DATA_DIR'),
help="Path to directory in which log and camera files are located.")
parser.add_argument("--no-loop", action="store_true", help="Stop at the end of the replay.")
def key_value_pair(x):
return x.split("=")
parser.add_argument("address_mapping", nargs="*", type=key_value_pair,
help="Pairs <service>=<zmq_addr> to publish <service> on <zmq_addr>.")
def comma_list(x):
return x.split(",")
to_mock_group = parser.add_mutually_exclusive_group()
to_mock_group.add_argument("--min", action="store_true", default=os.getenv("MIN"))
to_mock_group.add_argument("--enabled", default=os.getenv("ENABLED"), type=comma_list)
parser.add_argument("--disabled", type=comma_list, default=os.getenv("DISABLED") or ())
parser.add_argument(
"--tl", dest="publish_time_length", type=float, default=None,
help="Length of interval in event time for which messages should be published.")
parser.add_argument(
"--no-realtime", dest="realtime", action="store_false", default=True,
help="Publish messages as quickly as possible instead of realtime.")
parser.add_argument(
"--no-interactive", dest="interactive", action="store_false", default=True,
help="Disable interactivity.")
parser.add_argument(
"--bind-early", action="store_true", default=False,
help="Bind early to avoid dropping messages.")
parser.add_argument(
"--no-visionipc", action="store_true", default=False,
help="Do not output video over visionipc")
parser.add_argument(
"--start-time", type=float, default=0.,
help="Seek to this absolute time (in seconds) upon starting playback.")
return parser
def main(argv):
args = get_arg_parser().parse_args(sys.argv[1:])
command_address = "ipc:///tmp/{}".format(uuid4())
forward_commands_address = "ipc:///tmp/{}".format(uuid4())
data_address = "ipc:///tmp/{}".format(uuid4())
address_mapping = _get_address_mapping(args)
command_sock = zmq.Context.instance().socket(zmq.PUSH)
command_sock.connect(command_address)
if args.route_name is not None:
route_name_split = args.route_name.split("|")
if len(route_name_split) > 1:
route_start_time = timestamp_to_s(route_name_split[1])
else:
route_start_time = 0
command_sock.send_pyobj(
SetRoute(args.route_name, args.start_time, args.data_dir))
else:
print("waiting for external command...")
route_start_time = 0
subprocesses = {}
try:
subprocesses["data"] = multiprocessing.Process(
target=UnloggerWorker().run,
args=(forward_commands_address, data_address, address_mapping.copy()))
subprocesses["control"] = multiprocessing.Process(
target=unlogger_thread,
args=(command_address, forward_commands_address, data_address, args.realtime,
_get_address_mapping(args), args.publish_time_length, args.bind_early, args.no_loop, args.no_visionipc))
subprocesses["data"].start()
subprocesses["control"].start()
# Exit if any of the children die.
def exit_if_children_dead(*_):
for _, p in subprocesses.items():
if not p.is_alive():
[p.terminate() for p in subprocesses.values()]
exit()
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGCHLD, exit_if_children_dead)
if args.interactive:
keyboard_controller_thread(command_sock, route_start_time)
else:
# Wait forever for children.
while True:
time.sleep(10000.)
finally:
for p in subprocesses.values():
if p.is_alive():
try:
p.join(3.)
except multiprocessing.TimeoutError:
p.terminate()
continue
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
job.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Fix cloudpickle compatible problem we known.
import compatible_trick
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['XPARL'] = 'True'
import argparse
import cloudpickle
import pickle
import psutil
import re
import sys
import tempfile
import threading
import time
import traceback
import zmq
from multiprocessing import Process, Pipe
from parl.utils import to_str, to_byte, get_ip_address, logger
from parl.utils.communication import loads_argument, loads_return,\
dumps_argument, dumps_return
from parl.remote import remote_constants
from parl.utils.exceptions import SerializeError, DeserializeError
from parl.remote.message import InitializedJob
from parl.remote.utils import load_remote_class, redirect_stdout_to_file
class Job(object):
"""Base class for the job.
After establishing connection with the remote object, the job will
create a remote class instance locally and enter an infinite loop
in a separate process, waiting for commands from the remote object.
"""
def __init__(self, worker_address, log_server_address):
"""
Args:
worker_address(str): worker_address for sending job information(e.g, pid)
Attributes:
pid (int): Job process ID.
max_memory (float): Maximum memory (MB) can be used by each remote instance.
"""
self.max_memory = None
self.job_address_receiver, job_address_sender = Pipe()
self.job_id_receiver, job_id_sender = Pipe()
self.worker_address = worker_address
self.log_server_address = log_server_address
self.job_ip = get_ip_address()
self.pid = os.getpid()
self.run_job_process = Process(
target=self.run, args=(job_address_sender, job_id_sender))
self.run_job_process.start()
"""
NOTE:
In Windows, it will raise errors when creating threading.Lock before starting multiprocess.Process.
"""
self.lock = threading.Lock()
self._create_sockets()
process = psutil.Process(self.pid)
self.init_memory = float(process.memory_info()[0]) / (1024**2)
self.run_job_process.join()
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
os._exit(0)
def _create_sockets(self):
"""Create five sockets for each job in main process.
(1) job_socket(functional socket): sends job_address and heartbeat_address to worker.
(2) ping_heartbeat_socket: replies ping message of client.
(3) worker_heartbeat_socket: replies heartbeat message of worker.
(4) client_heartbeat_socket: replies heartbeat message of client.
(5) kill_job_socket: sends a command to the corresponding worker to kill the job.
"""
# wait for another process to create reply socket
self.job_address = self.job_address_receiver.recv()
self.job_id = self.job_id_receiver.recv()
self.ctx = zmq.Context()
# create the job_socket
self.job_socket = self.ctx.socket(zmq.REQ)
self.job_socket.connect("tcp://{}".format(self.worker_address))
# a thread that reply ping signals from the client
ping_heartbeat_socket, ping_heartbeat_address = self._create_heartbeat_server(
timeout=False)
ping_thread = threading.Thread(
target=self._reply_ping, args=(ping_heartbeat_socket, ))
ping_thread.setDaemon(True)
ping_thread.start()
# a thread that reply heartbeat signals from the worker
worker_heartbeat_socket, worker_heartbeat_address = self._create_heartbeat_server(
)
worker_thread = threading.Thread(
target=self._reply_worker_heartbeat,
args=(worker_heartbeat_socket, ))
worker_thread.setDaemon(True)
# a thread that reply heartbeat signals from the client
client_heartbeat_socket, client_heartbeat_address = self._create_heartbeat_server(
)
self.client_thread = threading.Thread(
target=self._reply_client_heartbeat,
args=(client_heartbeat_socket, ))
self.client_thread.setDaemon(True)
# sends job information to the worker
initialized_job = InitializedJob(
self.job_address, worker_heartbeat_address,
client_heartbeat_address, ping_heartbeat_address, None, self.pid,
self.job_id, self.log_server_address)
self.job_socket.send_multipart(
[remote_constants.NORMAL_TAG,
cloudpickle.dumps(initialized_job)])
message = self.job_socket.recv_multipart()
worker_thread.start()
tag = message[0]
assert tag == remote_constants.NORMAL_TAG
# create the kill_job_socket
kill_job_address = to_str(message[1])
self.kill_job_socket = self.ctx.socket(zmq.REQ)
self.kill_job_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)
self.kill_job_socket.connect("tcp://{}".format(kill_job_address))
def _check_used_memory(self):
"""Check if the memory used by this job exceeds self.max_memory."""
stop_job = False
if self.max_memory is not None:
process = psutil.Process(self.pid)
used_memory = float(process.memory_info()[0]) / (1024**2)
if used_memory > self.max_memory + self.init_memory:
stop_job = True
return stop_job
def _reply_ping(self, socket):
"""Create a socket server that reply the ping signal from client.
This signal is used to make sure that the job is still alive.
"""
message = socket.recv_multipart()
max_memory = to_str(message[1])
if max_memory != 'None':
self.max_memory = float(max_memory)
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
self.client_thread.start()
socket.close(0)
def _create_heartbeat_server(self, timeout=True):
"""Create a socket server that will raises timeout exception.
"""
heartbeat_socket = self.ctx.socket(zmq.REP)
if timeout:
heartbeat_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
heartbeat_socket.linger = 0
heartbeat_port = heartbeat_socket.bind_to_random_port(addr="tcp://*")
heartbeat_address = "{}:{}".format(self.job_ip, heartbeat_port)
return heartbeat_socket, heartbeat_address
def _reply_client_heartbeat(self, socket):
"""Create a socket that replies heartbeat signals from the client.
If the job losts connection with the client, it will exit too.
"""
while True:
try:
message = socket.recv_multipart()
stop_job = self._check_used_memory()
socket.send_multipart([
remote_constants.HEARTBEAT_TAG,
to_byte(str(stop_job)),
to_byte(self.job_address)
])
if stop_job == True:
logger.error(
"Memory used by this job exceeds {}. This job will exist."
.format(self.max_memory))
time.sleep(5)
socket.close(0)
os._exit(1)
except zmq.error.Again as e:
logger.warning(
"[Job] Cannot connect to the client. This job will exit and inform the worker."
)
break
socket.close(0)
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
logger.warning("[Job]lost connection with the client, will exit")
os._exit(1)
def _reply_worker_heartbeat(self, socket):
"""create a socket that replies heartbeat signals from the worker.
If the worker has exited, the job will exit automatically.
"""
while True:
try:
message = socket.recv_multipart()
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
except zmq.error.Again as e:
logger.warning("[Job] Cannot connect to the worker{}. ".format(
self.worker_address) + "Job will quit.")
break
socket.close(0)
os._exit(1)
def wait_for_files(self, reply_socket, job_address):
"""Wait for python files from remote object.
When a remote object receives the allocated job address, it will send
the python files to the job. Later, the job will save these files to a
temporary directory and add the temporary diretory to Python's working
directory.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
Returns:
A temporary directory containing the python files.
"""
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.SEND_FILE_TAG:
pyfiles = pickle.loads(message[1])
# save python files to temporary directory
envdir = tempfile.mkdtemp()
for file, code in pyfiles['python_files'].items():
file = os.path.join(envdir, file)
with open(file, 'wb') as code_file:
code_file.write(code)
# save other files to current directory
for file, content in pyfiles['other_files'].items():
# create directory (i.e. ./rom_files/)
if '/' in file:
try:
sep = os.sep
recursive_dirs = os.path.join(*(file.split(sep)[:-1]))
recursive_dirs = os.path.join(envdir, recursive_dirs)
os.makedirs(recursive_dirs)
except OSError as e:
pass
file = os.path.join(envdir, file)
with open(file, 'wb') as f:
f.write(content)
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
return envdir
else:
logger.error("NotImplementedError:{}, received tag:{}".format(
job_address, ))
raise NotImplementedError
def wait_for_connection(self, reply_socket):
"""Wait for connection from the remote object.
The remote object will send its class information and initialization
arguments to the job, these parameters are then used to create a
local instance in the job process.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
Returns:
A local instance of the remote class object.
"""
message = reply_socket.recv_multipart()
tag = message[0]
obj = None
if tag == remote_constants.INIT_OBJECT_TAG:
try:
file_name, class_name, end_of_file = cloudpickle.loads(
message[1])
#/home/nlp-ol/Firework/baidu/nlp/evokit/python_api/es_agent -> es_agent
file_name = file_name.split(os.sep)[-1]
cls = load_remote_class(file_name, class_name, end_of_file)
args, kwargs = cloudpickle.loads(message[2])
logfile_path = os.path.join(self.log_dir, 'stdout.log')
with redirect_stdout_to_file(logfile_path):
obj = cls(*args, **kwargs)
except Exception as e:
traceback_str = str(traceback.format_exc())
error_str = str(e)
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" + traceback_str)
])
return None
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
else:
logger.error("Message from job {}".format(message))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
b"[job]Unkonwn tag when tried to receive the class definition"
])
raise NotImplementedError
return obj
def run(self, job_address_sender, job_id_sender):
"""An infinite loop waiting for a new task.
Args:
job_address_sender(sending end of multiprocessing.Pipe): send job address of reply_socket to main process.
"""
ctx = zmq.Context()
# create the reply_socket
reply_socket = ctx.socket(zmq.REP)
job_port = reply_socket.bind_to_random_port(addr="tcp://*")
reply_socket.linger = 0
job_ip = get_ip_address()
job_address = "{}:{}".format(job_ip, job_port)
job_id = job_address.replace(':', '_') + '_' + str(int(time.time()))
self.log_dir = os.path.expanduser('~/.parl_data/job/{}'.format(job_id))
logger.set_dir(self.log_dir)
logger.info(
"[Job] Job {} initialized. Reply heartbeat socket Address: {}.".
format(job_id, job_address))
job_address_sender.send(job_address)
job_id_sender.send(job_id)
try:
# receive source code from the actor and append them to the environment variables.
envdir = self.wait_for_files(reply_socket, job_address)
sys.path.insert(0, envdir)
os.chdir(envdir)
obj = self.wait_for_connection(reply_socket)
assert obj is not None
self.single_task(obj, reply_socket, job_address)
except Exception as e:
logger.error(
"Error occurs when running a single task. We will reset this job. \nReason:{}"
.format(e))
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
def single_task(self, obj, reply_socket, job_address):
"""An infinite loop waiting for commands from the remote object.
Each job will receive two kinds of message from the remote object:
1. When the remote object calls a function, job will run the
function on the local instance and return the results to the
remote object.
2. When the remote object is deleted, the job will quit and release
related computation resources.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
"""
while True:
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.CALL_TAG:
try:
function_name = to_str(message[1])
data = message[2]
args, kwargs = loads_argument(data)
# Redirect stdout to stdout.log temporarily
logfile_path = os.path.join(self.log_dir, 'stdout.log')
with redirect_stdout_to_file(logfile_path):
ret = getattr(obj, function_name)(*args, **kwargs)
ret = dumps_return(ret)
reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
except Exception as e:
# reset the job
error_str = str(e)
logger.error(error_str)
if type(e) == AttributeError:
reply_socket.send_multipart([
remote_constants.ATTRIBUTE_EXCEPTION_TAG,
to_byte(error_str)
])
raise AttributeError
elif type(e) == SerializeError:
reply_socket.send_multipart([
remote_constants.SERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise SerializeError
elif type(e) == DeserializeError:
reply_socket.send_multipart([
remote_constants.DESERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise DeserializeError
else:
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" +
traceback_str)
])
break
# receive DELETE_TAG from actor, and stop replying worker heartbeat
elif tag == remote_constants.KILLJOB_TAG:
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
logger.warning("An actor exits and this job {} will exit.".
format(job_address))
break
else:
logger.error(
"The job receives an unknown message: {}".format(message))
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--worker_address", required=True, type=str, help="worker_address")
parser.add_argument(
"--log_server_address",
required=True,
type=str,
help="log_server_address, address of the log web server on worker")
args = parser.parse_args()
job = Job(args.worker_address, args.log_server_address)
|
h2o_cloud.py
|
import time, os, stat, json, signal, tempfile, shutil, datetime, inspect, threading, getpass
import requests, argparse, sys, unittest, glob
import urlparse, logging, random
import psutil, requests
import h2o_sandbox
# used in shutil.rmtree permission hack for windows
import errno
# For checking ports in use, using netstat thru a subprocess.
from subprocess import Popen, PIPE
def verboseprint(*args, **kwargs):
if verbose:
for x in args: # so you don't have to create a single string
print x,
for x in kwargs: # so you don't have to create a single string
print x,
print
# so we can see problems when hung?
sys.stdout.flush()
# The cloud is uniquely named per user (only)
# Fine to uniquely identify the flatfile by name only also?
# Both are the user that runs the test. The config might have a different username on the
# remote machine (0xdiag, say, or hduser)
def flatfile_name():
return ('pytest_flatfile-%s' % getpass.getuser())
# only usable after you've built a cloud (junit, watch out)
def cloud_name():
return nodes[0].cloud_name
def __drain(src, dst):
for l in src:
if type(dst) == type(0):
os.write(dst, l)
else:
dst.write(l)
dst.flush()
src.close()
if type(dst) == type(0):
os.close(dst)
def drain(src, dst):
t = threading.Thread(target=__drain, args=(src, dst))
t.daemon = True
t.start()
# Hackery: find the ip address that gets you to Google's DNS
# Trickiness because you might have multiple IP addresses (Virtualbox), or Windows.
# we used to not like giving ip 127.0.0.1 to h2o?
def get_ip_address():
if ipaddr_from_cmd_line:
verboseprint("get_ip case 1:", ipaddr_from_cmd_line)
return ipaddr_from_cmd_line
import socket
ip = '127.0.0.1'
socket.setdefaulttimeout(0.5)
hostname = socket.gethostname()
# this method doesn't work if vpn is enabled..it gets the vpn ip
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
ip = s.getsockname()[0]
verboseprint("get_ip case 2:", ip)
except:
pass
if ip.startswith('127'):
# drills down into family
ip = socket.getaddrinfo(hostname, None)[0][4][0]
verboseprint("get_ip case 3:", ip)
ipa = None
try:
# Translate a host name to IPv4 address format, extended interface.
# Return a triple (hostname, aliaslist, ipaddrlist)
# where hostname is the primary host name responding to the given ip_address,
# aliaslist is a (possibly empty) list of alternative host names for the same address, and
# ipaddrlist is a list of IPv4 addresses for the same interface on the same host
ghbx = socket.gethostbyname_ex(hostname)
for ips in ghbx[2]:
# only take the first
if ipa is None and not ips.startswith("127."):
ipa = ips[:]
verboseprint("get_ip case 4:", ipa)
if ip != ipa:
print "\nAssuming", ip, "is the ip address h2o will use but", ipa, "is probably the real ip?"
print "You might have a vpn active. Best to use '-ip " + ipa + "' to get python and h2o the same."
except:
pass
# print "Timeout during socket.gethostbyname_ex(hostname)"
verboseprint("get_ip_address:", ip)
# set it back to default higher timeout (None would be no timeout?)
socket.setdefaulttimeout(5)
return ip
def get_sandbox_name():
if os.environ.has_key("H2O_SANDBOX_NAME"):
return os.environ["H2O_SANDBOX_NAME"]
else:
return "sandbox"
def unit_main():
global python_test_name, python_cmd_args, python_cmd_line, python_cmd_ip, python_username
# python_test_name = inspect.stack()[1][1]
python_test_name = ""
python_cmd_args = " ".join(sys.argv[1:])
python_cmd_line = "python %s %s" % (python_test_name, python_cmd_args)
python_username = getpass.getuser()
print "\nTest: %s command line: %s" % (python_test_name, python_cmd_line)
parse_our_args()
unittest.main()
verbose = False
ipaddr_from_cmd_line = None
config_json = None
debugger = False
random_seed = None
beta_features = True
abort_after_import = False
debug_rest = False
# jenkins gets this assign, but not the unit_main one?
# python_test_name = inspect.stack()[1][1]
python_test_name = ""
# trust what the user says!
if ipaddr_from_cmd_line:
python_cmd_ip = ipaddr_from_cmd_line
else:
python_cmd_ip = get_ip_address()
# no command line args if run with just nose
python_cmd_args = ""
# don't really know what it is if nosetests did some stuff. Should be just the test with no args
python_cmd_line = ""
python_username = getpass.getuser()
def parse_our_args():
parser = argparse.ArgumentParser()
# can add more here
parser.add_argument('-v', '--verbose', help='increased output', action='store_true')
parser.add_argument('-ip', '--ip', type=str, help='IP address to use for single host H2O with psutil control')
parser.add_argument('-cj', '--config_json',
help='Use this json format file to provide multi-host defaults. Overrides the default file pytest_config-<username>.json. These are used only if you do build_cloud_with_hosts()')
parser.add_argument('-dbg', '--debugger', help='Launch java processes with java debug attach mechanisms',
action='store_true')
parser.add_argument('-s', '--random_seed', type=int, help='initialize SEED (64-bit integer) for random generators')
parser.add_argument('-bf', '--beta_features', help='enable or switch to beta features (import2/parse2)',
action='store_true')
parser.add_argument('-debug_rest', '--debug_rest', help='Print REST API interactions to rest.log',
action='store_true')
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
global verbose, ipaddr_from_cmd_line, config_json, debugger
global random_seed, beta_features, debug_rest
verbose = args.verbose
ipaddr_from_cmd_line = args.ip
config_json = args.config_json
debugger = args.debugger
random_seed = args.random_seed
debug_rest = args.debug_rest
# Set sys.argv to the unittest args (leav sys.argv[0] as is)
# FIX! this isn't working to grab the args we don't care about
# Pass "--failfast" to stop on first error to unittest. and -v
# won't get this for jenkins, since it doesn't do parse_our_args
sys.argv[1:] = ['-v', "--failfast"] + args.unittest_args
# sys.argv[1:] = args.unittest_args
def find_file(base):
f = base
if not os.path.exists(f): f = '../' + base
if not os.path.exists(f): f = '../../' + base
if not os.path.exists(f): f = 'py/' + base
# these 2 are for finding from h2o-perf
if not os.path.exists(f): f = '../h2o/' + base
if not os.path.exists(f): f = '../../h2o/' + base
if not os.path.exists(f):
raise Exception("unable to find file %s" % base)
return f
# shutil.rmtree doesn't work on windows if the files are read only.
# On unix the parent dir has to not be readonly too.
# May still be issues with owner being different, like if 'system' is the guy running?
# Apparently this escape function on errors is the way shutil.rmtree can
# handle the permission issue. (do chmod here)
# But we shouldn't have read-only files. So don't try to handle that case.
def handleRemoveError(func, path, exc):
# If there was an error, it could be due to windows holding onto files.
# Wait a bit before retrying. Ignore errors on the retry. Just leave files.
# Ex. if we're in the looping cloud test deleting sandbox.
excvalue = exc[1]
print "Retrying shutil.rmtree of sandbox (2 sec delay). Will ignore errors. Exception was", excvalue.errno
time.sleep(2)
try:
func(path)
except OSError:
pass
LOG_DIR = get_sandbox_name()
def clean_sandbox():
if os.path.exists(LOG_DIR):
# shutil.rmtree hangs if symlinks in the dir? (in syn_datasets for multifile parse)
# use os.remove() first
for f in glob.glob(LOG_DIR + '/syn_datasets/*'):
verboseprint("cleaning", f)
os.remove(f)
# shutil.rmtree fails to delete very long filenames on Windoze
#shutil.rmtree(LOG_DIR)
# was this on 3/5/13. This seems reliable on windows+cygwin
### os.system("rm -rf "+LOG_DIR)
shutil.rmtree(LOG_DIR, ignore_errors=False, onerror=handleRemoveError)
# it should have been removed, but on error it might still be there
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
# who knows if this one is ok with windows...doesn't rm dir, just
# the stdout/stderr files
def clean_sandbox_stdout_stderr():
if os.path.exists(LOG_DIR):
files = []
# glob.glob returns an iterator
for f in glob.glob(LOG_DIR + '/*stdout*'):
verboseprint("cleaning", f)
os.remove(f)
for f in glob.glob(LOG_DIR + '/*stderr*'):
verboseprint("cleaning", f)
os.remove(f)
def tmp_file(prefix='', suffix='', tmp_dir=None):
if not tmp_dir:
tmpdir = LOG_DIR
else:
tmpdir = tmp_dir
fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=tmpdir)
# make sure the file now exists
# os.open(path, 'a').close()
# give everyone permission to read it (jenkins running as
# 0xcustomer needs to archive as jenkins
permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(path, permissions)
return (fd, path)
def tmp_dir(prefix='', suffix=''):
return tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=LOG_DIR)
def log(cmd, comment=None):
filename = LOG_DIR + '/commands.log'
# everyone can read
with open(filename, 'a') as f:
f.write(str(datetime.datetime.now()) + ' -- ')
# what got sent to h2o
# f.write(cmd)
# let's try saving the unencoded url instead..human readable
if cmd:
f.write(urlparse.unquote(cmd))
if comment:
f.write(' #')
f.write(comment)
f.write("\n")
elif comment: # for comment-only
f.write(comment + "\n")
# jenkins runs as 0xcustomer, and the file wants to be archived by jenkins who isn't in his group
permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(filename, permissions)
def make_syn_dir():
# move under sandbox
# the LOG_DIR must have been created for commands.log before any datasets would be created
SYNDATASETS_DIR = LOG_DIR + '/syn_datasets'
if os.path.exists(SYNDATASETS_DIR):
shutil.rmtree(SYNDATASETS_DIR)
os.mkdir(SYNDATASETS_DIR)
return SYNDATASETS_DIR
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
# can't have a list of cmds, because cmd is a list
# cmdBefore gets executed first, and we wait for it to complete
def spawn_cmd(name, cmd, capture_output=True, **kwargs):
if capture_output:
outfd, outpath = tmp_file(name + '.stdout.', '.log')
errfd, errpath = tmp_file(name + '.stderr.', '.log')
# everyone can read
ps = psutil.Popen(cmd, stdin=None, stdout=outfd, stderr=errfd, **kwargs)
else:
outpath = '<stdout>'
errpath = '<stderr>'
ps = psutil.Popen(cmd, **kwargs)
comment = 'PID %d, stdout %s, stderr %s' % (
ps.pid, os.path.basename(outpath), os.path.basename(errpath))
log(' '.join(cmd), comment=comment)
return (ps, outpath, errpath)
def spawn_wait(ps, stdout, stderr, capture_output=True, timeout=None):
rc = ps.wait(timeout)
if capture_output:
out = file(stdout).read()
err = file(stderr).read()
else:
out = 'stdout not captured'
err = 'stderr not captured'
if rc is None:
ps.terminate()
raise Exception("%s %s timed out after %d\nstdout:\n%s\n\nstderr:\n%s" %
(ps.name, ps.cmdline, timeout or 0, out, err))
elif rc != 0:
raise Exception("%s %s failed.\nstdout:\n%s\n\nstderr:\n%s" %
(ps.name, ps.cmdline, out, err))
return rc
def spawn_cmd_and_wait(name, cmd, capture_output=True, timeout=None, **kwargs):
(ps, stdout, stderr) = spawn_cmd(name, cmd, capture_output, **kwargs)
spawn_wait(ps, stdout, stderr, capture_output, timeout)
global nodes
nodes = []
# I suppose we could shuffle the flatfile order!
# but it uses hosts, so if that got shuffled, we got it covered?
# the i in xrange part is not shuffled. maybe create the list first, for possible random shuffle
# FIX! default to random_shuffle for now..then switch to not.
def write_flatfile(node_count=2, base_port=54321, hosts=None):
# always create the flatfile.
ports_per_node = 2
pff = open(flatfile_name(), "w+")
# doing this list outside the loops so we can shuffle for better test variation
hostPortList = []
if hosts is None:
ip = python_cmd_ip
for i in range(node_count):
hostPortList.append(ip + ":" + str(base_port + ports_per_node * i))
else:
for h in hosts:
for i in range(node_count):
# removed leading "/"
hostPortList.append(h.addr + ":" + str(base_port + ports_per_node * i))
for hp in hostPortList:
pff.write(hp + "\n")
pff.close()
def check_h2o_version():
# assumes you want to know about 3 ports starting at base_port
command1Split = ['java', '-jar', find_file('target/h2o.jar'), '--version']
command2Split = ['egrep', '-v', '( Java | started)']
print "Running h2o to get java version"
p1 = Popen(command1Split, stdout=PIPE)
p2 = Popen(command2Split, stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
print output
def default_hosts_file():
if os.environ.has_key("H2O_HOSTS_FILE"):
return os.environ["H2O_HOSTS_FILE"]
return 'pytest_config-{0}.json'.format(getpass.getuser())
# node_count is per host if hosts is specified.
def build_cloud(node_count=1, base_port=54321, hosts=None,
timeoutSecs=30, retryDelaySecs=1, cleanup=True,
conservative=False, **kwargs):
clean_sandbox()
log("#*********************************************************************")
log("Starting new test: " + python_test_name + " at build_cloud()")
log("#*********************************************************************")
# start up h2o to report the java version (once). output to python stdout
# only do this for regression testing
if getpass.getuser() == 'jenkins':
check_h2o_version()
# keep this param in kwargs, because we pass it to the H2O node build, so state
# is created that polling and other normal things can check, to decide to dump
# info to benchmark.log
if kwargs.setdefault('enable_benchmark_log', False):
setup_benchmark_log()
ports_per_node = 2
nodeList = []
try:
# if no hosts list, use psutil method on local host.
totalNodes = 0
# doing this list outside the loops so we can shuffle for better test variation
# this jvm startup shuffle is independent from the flatfile shuffle
portList = [base_port + ports_per_node * i for i in range(node_count)]
if hosts is None:
# if use_flatfile, we should create it,
# because tests will just call build_cloud with use_flatfile=True
# best to just create it all the time..may or may not be used
write_flatfile(node_count=node_count, base_port=base_port)
hostCount = 1
for p in portList:
verboseprint("psutil starting node", i)
newNode = LocalH2O(port=p, node_id=totalNodes, **kwargs)
nodeList.append(newNode)
totalNodes += 1
else:
# if hosts, the flatfile was created and uploaded to hosts already
# I guess don't recreate it, don't overwrite the one that was copied beforehand.
# we don't always use the flatfile (use_flatfile=False)
# Suppose we could dispatch from the flatfile to match it's contents
# but sometimes we want to test with a bad/different flatfile then we invoke h2o?
hostCount = len(hosts)
hostPortList = []
for h in hosts:
for port in portList:
hostPortList.append((h, port))
for (h, p) in hostPortList:
verboseprint('ssh starting node', totalNodes, 'via', h)
newNode = h.remote_h2o(port=p, node_id=totalNodes, **kwargs)
nodeList.append(newNode)
totalNodes += 1
verboseprint("Attempting Cloud stabilize of", totalNodes, "nodes on", hostCount, "hosts")
start = time.time()
# UPDATE: best to stabilize on the last node!
stabilize_cloud(nodeList[0], len(nodeList),
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, noExtraErrorCheck=True)
verboseprint(len(nodeList), "Last added node stabilized in ", time.time() - start, " secs")
verboseprint("Built cloud: %d nodes on %d hosts, in %d s" % (len(nodeList),
hostCount, (time.time() - start)))
print "Built cloud:", nodeList[0].java_heap_GB, "GB java heap(s) with", len(nodeList), "total nodes"
# FIX! using "consensus" in node[-1] should mean this is unnecessary?
# maybe there's a bug. For now do this. long term: don't want?
# UPDATE: do it for all cases now 2/14/13
if conservative: # still needed?
for n in nodeList:
stabilize_cloud(n, len(nodeList), timeoutSecs=timeoutSecs, noExtraErrorCheck=True)
# this does some extra checking now
verify_cloud_size(nodeList)
# best to check for any errors due to cloud building right away?
check_sandbox_for_errors(python_test_name=python_test_name)
except:
if cleanup:
for n in nodeList: n.terminate()
else:
nodes[:] = nodeList
check_sandbox_for_errors(python_test_name=python_test_name)
raise
# this is just in case they don't assign the return to the nodes global?
nodes[:] = nodeList
print len(nodeList), "total jvms in H2O cloud"
# put the test start message in the h2o log, to create a marker
nodes[0].h2o_log_msg()
if config_json:
# like cp -p. Save the config file, to sandbox
print "Saving the ", config_json, "we used to", LOG_DIR
shutil.copy(config_json, LOG_DIR + "/" + os.path.basename(config_json))
# Figure out some stuff about how this test was run
cs_time = str(datetime.datetime.now())
cs_cwd = os.getcwd()
cs_python_cmd_line = "python %s %s" % (python_test_name, python_cmd_args)
cs_python_test_name = python_test_name
if config_json:
cs_config_json = os.path.abspath(config_json)
else:
cs_config_json = None
cs_username = python_username
cs_ip = python_cmd_ip
return nodeList
def upload_jar_to_remote_hosts(hosts, slow_connection=False):
def prog(sofar, total):
# output is bad for jenkins.
username = getpass.getuser()
if username != 'jenkins':
p = int(10.0 * sofar / total)
sys.stdout.write('\rUploading jar [%s%s] %02d%%' % ('#' * p, ' ' * (10 - p), 100 * sofar / total))
sys.stdout.flush()
if not slow_connection:
for h in hosts:
f = find_file('target/h2o.jar')
h.upload_file(f, progress=prog)
# skipping progress indicator for the flatfile
h.upload_file(flatfile_name())
else:
f = find_file('target/h2o.jar')
hosts[0].upload_file(f, progress=prog)
hosts[0].push_file_to_remotes(f, hosts[1:])
f = find_file(flatfile_name())
hosts[0].upload_file(f, progress=prog)
hosts[0].push_file_to_remotes(f, hosts[1:])
def check_sandbox_for_errors(cloudShutdownIsError=False, sandboxIgnoreErrors=False, python_test_name=''):
# dont' have both tearDown and tearDownClass report the same found error
# only need the first
if nodes and nodes[0].sandbox_error_report(): # gets current state
return
# Can build a cloud that ignores all sandbox things that normally fatal the test
# Kludge, test will set this directly if it wants, rather than thru build_cloud parameter.
# we need the sandbox_ignore_errors, for the test teardown_cloud..the state disappears!
ignore = sandboxIgnoreErrors or (nodes and nodes[0].sandbox_ignore_errors)
errorFound = h2o_sandbox.check_sandbox_for_errors(
LOG_DIR=LOG_DIR,
sandboxIgnoreErrors=ignore,
cloudShutdownIsError=cloudShutdownIsError,
python_test_name=python_test_name)
if errorFound and nodes:
nodes[0].sandbox_error_report(True) # sets
def tear_down_cloud(nodeList=None, sandboxIgnoreErrors=False):
if not nodeList: nodeList = nodes
try:
for n in nodeList:
n.terminate()
verboseprint("tear_down_cloud n:", n)
finally:
check_sandbox_for_errors(sandboxIgnoreErrors=sandboxIgnoreErrors, python_test_name=python_test_name)
nodeList[:] = []
# timeoutSecs is per individual node get_cloud()
def verify_cloud_size(nodeList=None, verbose=False, timeoutSecs=10, ignoreHealth=False):
if not nodeList: nodeList = nodes
expectedSize = len(nodeList)
# cloud size and consensus have to reflect a single grab of information from a node.
cloudStatus = [n.get_cloud(timeoutSecs=timeoutSecs) for n in nodeList]
cloudSizes = [c['cloud_size'] for c in cloudStatus]
cloudConsensus = [c['consensus'] for c in cloudStatus]
cloudHealthy = [c['cloud_healthy'] for c in cloudStatus]
if not all(cloudHealthy):
msg = "Some node reported cloud_healthy not true: %s" % cloudHealthy
if not ignoreHealth:
raise Exception(msg)
# gather up all the node_healthy status too
for i, c in enumerate(cloudStatus):
nodesHealthy = [n['node_healthy'] for n in c['nodes']]
if not all(nodesHealthy):
print "node %s cloud status: %s" % (i, dump_json(c))
msg = "node %s says some node is not reporting node_healthy: %s" % (c['node_name'], nodesHealthy)
if not ignoreHealth:
raise Exception(msg)
if expectedSize == 0 or len(cloudSizes) == 0 or len(cloudConsensus) == 0:
print "\nexpectedSize:", expectedSize
print "cloudSizes:", cloudSizes
print "cloudConsensus:", cloudConsensus
raise Exception("Nothing in cloud. Can't verify size")
for s in cloudSizes:
consensusStr = (",".join(map(str, cloudConsensus)))
sizeStr = (",".join(map(str, cloudSizes)))
if (s != expectedSize):
raise Exception("Inconsistent cloud size." +
"nodeList report size: %s consensus: %s instead of %d." % \
(sizeStr, consensusStr, expectedSize))
return (sizeStr, consensusStr, expectedSize)
def stabilize_cloud(node, node_count, timeoutSecs=14.0, retryDelaySecs=0.25, noExtraErrorCheck=False):
node.wait_for_node_to_accept_connections(timeoutSecs, noExtraErrorCheck=noExtraErrorCheck)
# want node saying cloud = expected size, plus thinking everyone agrees with that.
def test(n, tries=None):
c = n.get_cloud(noExtraErrorCheck=True)
# don't want to check everything. But this will check that the keys are returned!
consensus = c['consensus']
locked = c['locked']
cloud_size = c['cloud_size']
cloud_name = c['cloud_name']
node_name = c['node_name']
if 'nodes' not in c:
emsg = "\nH2O didn't include a list of nodes in get_cloud response after initial cloud build"
raise Exception(emsg)
# only print it when you get consensus
if cloud_size != node_count:
verboseprint("\nNodes in cloud while building:")
for ci in c['nodes']:
verboseprint(ci['name'])
if (cloud_size > node_count):
emsg = (
"\n\nERROR: cloud_size: %d reported via json is bigger than we expect: %d" % (cloud_size, node_count) +
"\nYou likely have zombie(s) with the same cloud name on the network, that's forming up with you." +
"\nLook at the cloud IP's in 'grep Paxos sandbox/*stdout*' for some IP's you didn't expect." +
"\n\nYou probably don't have to do anything, as the cloud shutdown in this test should" +
"\nhave sent a Shutdown.json to all in that cloud (you'll see a kill -2 in the *stdout*)." +
"\nIf you try again, and it still fails, go to those IPs and kill the zombie h2o's." +
"\nIf you think you really have an intermittent cloud build, report it." +
"\n" +
"\nUPDATE: building cloud size of 2 with 127.0.0.1 may temporarily report 3 incorrectly, with no zombie?"
)
raise Exception(emsg)
a = (cloud_size == node_count) and consensus
if a:
verboseprint("\tLocked won't happen until after keys are written")
verboseprint("\nNodes in final cloud:")
for ci in c['nodes']:
verboseprint(ci['name'])
return a
node.stabilize(test, error=('A cloud of size %d' % node_count),
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs)
def log_rest(s):
if not debug_rest:
return
rest_log_file = open(os.path.join(LOG_DIR, "rest.log"), "a")
rest_log_file.write(s)
rest_log_file.write("\n")
rest_log_file.close()
class H2O(object):
def __url(self, loc, port=None):
# always use the new api port
if port is None: port = self.port
if loc.startswith('/'):
delim = ''
else:
delim = '/'
u = 'http://%s:%d%s%s' % (self.http_addr, port, delim, loc)
return u
def __do_json_request(self, jsonRequest=None, fullUrl=None, timeout=10, params=None, returnFast=False,
cmd='get', extraComment=None, ignoreH2oError=False, noExtraErrorCheck=False, **kwargs):
# if url param is used, use it as full url. otherwise crate from the jsonRequest
if fullUrl:
url = fullUrl
else:
url = self.__url(jsonRequest)
# remove any params that are 'None'
# need to copy dictionary, since can't delete while iterating
if params is not None:
params2 = params.copy()
for k in params2:
if params2[k] is None:
del params[k]
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
if extraComment:
log('Start ' + url + paramsStr, comment=extraComment)
else:
log('Start ' + url + paramsStr)
log_rest("")
log_rest("----------------------------------------------------------------------\n")
if extraComment:
log_rest("# Extra comment info about this request: " + extraComment)
if cmd == 'get':
log_rest("GET")
else:
log_rest("POST")
log_rest(url + paramsStr)
# file get passed thru kwargs here
try:
if cmd == 'post':
r = requests.post(url, timeout=timeout, params=params, **kwargs)
else:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
except Exception, e:
# rethrow the exception after we've checked for stack trace from h2o
# out of memory errors maybe don't show up right away? so we should wait for h2o
# to get it out to h2o stdout. We don't want to rely on cloud teardown to check
# because there's no delay, and we don't want to delay all cloud teardowns by waiting.
# (this is new/experimental)
exc_info = sys.exc_info()
# use this to ignore the initial connection errors during build cloud when h2o is coming up
if not noExtraErrorCheck:
print "ERROR: got exception on %s to h2o. \nGoing to check sandbox, then rethrow.." % (url + paramsStr)
time.sleep(2)
check_sandbox_for_errors(python_test_name=python_test_name);
log_rest("")
log_rest("EXCEPTION CAUGHT DOING REQUEST: " + str(e.message))
raise exc_info[1], None, exc_info[2]
log_rest("")
try:
if r is None:
log_rest("r is None")
else:
log_rest("HTTP status code: " + str(r.status_code))
if hasattr(r, 'text'):
if r.text is None:
log_rest("r.text is None")
else:
log_rest(r.text)
else:
log_rest("r does not have attr text")
except Exception, e:
# Paranoid exception catch.
# Ignore logging exceptions in the case that the above error checking isn't sufficient.
pass
# fatal if no response
if not r:
raise Exception("Maybe bad url? no r in __do_json_request in %s:" % inspect.stack()[1][3])
rjson = None
if returnFast:
return
try:
rjson = r.json()
except:
print dump_json(r.text)
if not isinstance(r, (list, dict)):
raise Exception("h2o json responses should always be lists or dicts, see previous for text")
raise Exception("Could not decode any json from the request.")
# TODO: we should really only look in the response object. This check
# prevents us from having a field called "error" (e.g., for a scoring result).
for e in ['error', 'Error', 'errors', 'Errors']:
# error can be null (python None). This happens in exec2
if e in rjson and rjson[e]:
print "rjson:", dump_json(rjson)
emsg = 'rjson %s in %s: %s' % (e, inspect.stack()[1][3], rjson[e])
if ignoreH2oError:
# well, we print it..so not totally ignore. test can look at rjson returned
print emsg
else:
print emsg
raise Exception(emsg)
for w in ['warning', 'Warning', 'warnings', 'Warnings']:
# warning can be null (python None).
if w in rjson and rjson[w]:
verboseprint(dump_json(rjson))
print 'rjson %s in %s: %s' % (w, inspect.stack()[1][3], rjson[w])
return rjson
def get_cloud(self, noExtraErrorCheck=False, timeoutSecs=10):
# hardwire it to allow a 60 second timeout
a = self.__do_json_request('Cloud.json', noExtraErrorCheck=noExtraErrorCheck, timeout=timeoutSecs)
consensus = a['consensus']
locked = a['locked']
cloud_size = a['cloud_size']
cloud_name = a['cloud_name']
node_name = a['node_name']
node_id = self.node_id
verboseprint('%s%s %s%s %s%s %s%s' % (
"\tnode_id: ", node_id,
"\tcloud_size: ", cloud_size,
"\tconsensus: ", consensus,
"\tlocked: ", locked,
))
return a
def h2o_log_msg(self, message=None):
if 1 == 0:
return
if not message:
message = "\n"
message += "\n#***********************"
message += "\npython_test_name: " + python_test_name
message += "\n#***********************"
params = {'message': message}
self.__do_json_request('2/LogAndEcho', params=params)
# Shutdown url is like a reset button. Doesn't send a response before it kills stuff
# safer if random things are wedged, rather than requiring response
# so request library might retry and get exception. allow that.
def shutdown_all(self):
try:
self.__do_json_request('Shutdown.json', noExtraErrorCheck=True)
except:
pass
time.sleep(1) # a little delay needed?
return (True)
def put_value(self, value, key=None, repl=None):
return self.__do_json_request(
'PutValue.json',
params={"value": value, "key": key, "replication_factor": repl},
extraComment=str(value) + "," + str(key) + "," + str(repl))
# noise is a 2-tuple ("StoreView", none) for url plus args for doing during poll to create noise
# so we can create noise with different urls!, and different parms to that url
# no noise if None
def poll_url(self, response,
timeoutSecs=10, retryDelaySecs=0.5, initialDelaySecs=0, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, reuseFirstPollUrl=False, noPrint=False):
### print "poll_url: pollTimeoutSecs", pollTimeoutSecs
verboseprint('poll_url input: response:', dump_json(response))
print "at top of poll_url, timeoutSec: ", timeoutSecs
def get_redirect_url(response):
url = None
params = None
# StoreView has old style, while beta_features
if 'response_info' in response:
response_info = response['response_info']
if 'redirect_url' not in response_info:
raise Exception("Response during polling must have 'redirect_url'\n%s" % dump_json(response))
if response_info['status'] != 'done':
redirect_url = response_info['redirect_url']
if redirect_url:
url = self.__url(redirect_url)
params = None
else:
if response_info['status'] != 'done':
raise Exception(
"'redirect_url' during polling is null but status!='done': \n%s" % dump_json(response))
else:
if 'response' not in response:
raise Exception("'response' not in response.\n%s" % dump_json(response))
if response['response']['status'] != 'done':
if 'redirect_request' not in response['response']:
raise Exception("'redirect_request' not in response. \n%s" % dump_json(response))
url = self.__url(response['response']['redirect_request'])
params = response['response']['redirect_request_args']
return (url, params)
# if we never poll
msgUsed = None
if 'response_info' in response: # trigger v2 for GBM always?
status = response['response_info']['status']
progress = response.get('progress', "")
else:
r = response['response']
status = r['status']
progress = r.get('progress', "")
doFirstPoll = status != 'done'
(url, params) = get_redirect_url(response)
# no need to recreate the string for messaging, in the loop..
if params:
paramsStr = '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
# FIX! don't do JStack noise for tests that ask for it. JStack seems to have problems
noise_enable = noise and noise != ("JStack", None)
if noise_enable:
print "Using noise during poll_url:", noise
# noise_json should be like "Storeview"
(noise_json, noiseParams) = noise
noiseUrl = self.__url(noise_json + ".json")
if noiseParams is None:
noiseParamsStr = ""
else:
noiseParamsStr = '&'.join(['%s=%s' % (k, v) for (k, v) in noiseParams.items()])
start = time.time()
count = 0
if initialDelaySecs:
time.sleep(initialDelaySecs)
# can end with status = 'redirect' or 'done'
# Update: on DRF2, the first RF redirects to progress. So we should follow that, and follow any redirect to view?
# so for v2, we'll always follow redirects?
# For v1, we're not forcing the first status to be 'poll' now..so it could be redirect or done?(NN score? if blocking)
# Don't follow the Parse redirect to Inspect, because we want parseResult['destination_key'] to be the end.
# note this doesn't affect polling with Inspect? (since it doesn't redirect ?
while status == 'poll' or doFirstPoll or (status == 'redirect' and 'Inspect' not in url):
count += 1
if ((time.time() - start) > timeoutSecs):
# show what we're polling with
emsg = "Exceeded timeoutSecs: %d secs while polling." % timeoutSecs + \
"status: %s, url: %s?%s" % (status, urlUsed, paramsUsedStr)
raise Exception(emsg)
if benchmarkLogging:
cloudPerfH2O.get_log_save(benchmarkLogging)
# every other one?
create_noise = noise_enable and ((count % 2) == 0)
if create_noise:
urlUsed = noiseUrl
paramsUsed = noiseParams
paramsUsedStr = noiseParamsStr
msgUsed = "\nNoise during polling with"
else:
urlUsed = url
paramsUsed = params
paramsUsedStr = paramsStr
msgUsed = "\nPolling with"
print status, progress, urlUsed
time.sleep(retryDelaySecs)
response = self.__do_json_request(fullUrl=urlUsed, timeout=pollTimeoutSecs, params=paramsUsed)
verboseprint(msgUsed, urlUsed, paramsUsedStr, "Response:", dump_json(response))
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
if ((count % 6) == 0):
check_sandbox_for_errors(python_test_name=python_test_name)
if (create_noise):
# this guarantees the loop is done, so we don't need to worry about
# a 'return r' being interpreted from a noise response
status = 'poll'
progress = ''
else:
doFirstPoll = False
status = response['response_info']['status']
progress = response.get('progress', "")
# get the redirect url
if not reuseFirstPollUrl: # reuse url for all v1 stuff
(url, params) = get_redirect_url(response)
if noPoll:
return response
# won't print if we didn't poll
if msgUsed:
verboseprint(msgUsed, urlUsed, paramsUsedStr, "Response:", dump_json(response))
return response
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5):
'''Repeatedly test a function waiting for it to return True.
Arguments:
test_func -- A function that will be run repeatedly
error -- A function that will be run to produce an error message
it will be called with (node, timeTakenSecs, numberOfRetries)
OR
-- A string that will be interpolated with a dictionary of
{ 'timeTakenSecs', 'numberOfRetries' }
timeoutSecs -- How long in seconds to keep trying before declaring a failure
retryDelaySecs -- How long to wait between retry attempts
'''
start = time.time()
numberOfRetries = 0
while time.time() - start < timeoutSecs:
if test_func(self, tries=numberOfRetries):
break
time.sleep(retryDelaySecs)
numberOfRetries += 1
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
# to find the badness?. can check_sandbox_for_errors at any time
if ((numberOfRetries % 50) == 0):
check_sandbox_for_errors(python_test_name=python_test_name)
else:
timeTakenSecs = time.time() - start
if isinstance(error, type('')):
raise Exception('%s failed after %.2f seconds having retried %d times' % (
error, timeTakenSecs, numberOfRetries))
else:
msg = error(self, timeTakenSecs, numberOfRetries)
raise Exception(msg)
def wait_for_node_to_accept_connections(self, timeoutSecs=15, noExtraErrorCheck=False):
verboseprint("wait_for_node_to_accept_connections")
def test(n, tries=None):
try:
n.get_cloud(noExtraErrorCheck=noExtraErrorCheck)
return True
except requests.ConnectionError, e:
# Now using: requests 1.1.0 (easy_install --upgrade requests) 2/5/13
# Now: assume all requests.ConnectionErrors are H2O legal connection errors.
# Have trouble finding where the errno is, fine to assume all are good ones.
# Timeout check will kick in if continued H2O badness.
return False
self.stabilize(test, 'Cloud accepting connections',
timeoutSecs=timeoutSecs, # with cold cache's this can be quite slow
retryDelaySecs=0.1) # but normally it is very fast
def sandbox_error_report(self, done=None):
# not clearable..just or in new value
if done:
self.sandbox_error_was_reported = True
return (self.sandbox_error_was_reported)
def get_args(self):
args = ['java']
# I guess it doesn't matter if we use flatfile for both now
# defaults to not specifying
# FIX! we need to check that it's not outside the limits of the dram of the machine it's running on?
if self.java_heap_GB is not None:
if not (1 <= self.java_heap_GB <= 256):
raise Exception('java_heap_GB <1 or >256 (GB): %s' % (self.java_heap_GB))
args += ['-Xms%dG' % self.java_heap_GB]
args += ['-Xmx%dG' % self.java_heap_GB]
if self.java_heap_MB is not None:
if not (1 <= self.java_heap_MB <= 256000):
raise Exception('java_heap_MB <1 or >256000 (MB): %s' % (self.java_heap_MB))
args += ['-Xms%dm' % self.java_heap_MB]
args += ['-Xmx%dm' % self.java_heap_MB]
if self.java_extra_args is not None:
args += ['%s' % self.java_extra_args]
args += ["-ea"]
if self.use_maprfs:
args += ["-Djava.library.path=/opt/mapr/lib"]
if self.classpath:
entries = [find_file('build/classes'), find_file('lib/javassist.jar')]
entries += glob.glob(find_file('lib') + '/*/*.jar')
entries += glob.glob(find_file('lib') + '/*/*/*.jar')
args += ['-classpath', os.pathsep.join(entries), 'water.Boot']
else:
args += ["-jar", self.get_h2o_jar()]
if 1==1:
if self.hdfs_config:
args += [
'-hdfs_config=' + self.hdfs_config
]
if beta_features:
args += ["-beta"]
# H2O should figure it out, if not specified
# DON"T EVER USE on multi-machine...h2o should always get it right, to be able to run on hadoop
# where it's not told
if (self.addr is not None) and (not self.remoteH2O):
args += [
'--ip=%s' % self.addr,
]
# Need to specify port, since there can be multiple ports for an ip in the flatfile
if self.port is not None:
args += [
"--port=%d" % self.port,
]
if self.use_debugger:
# currently hardwire the base port for debugger to 8000
# increment by one for every node we add
# sence this order is different than h2o cluster order, print out the ip and port for the user
# we could save debugger_port state per node, but not really necessary (but would be more consistent)
debuggerBasePort = 8000
if self.node_id is None:
debuggerPort = debuggerBasePort
else:
debuggerPort = debuggerBasePort + self.node_id
if self.http_addr:
a = self.http_addr
else:
a = "localhost"
if self.port:
b = str(self.port)
else:
b = "h2o determined"
# I guess we always specify port?
print "You can attach debugger at port %s for jvm at %s:%s" % (debuggerPort, a, b)
args += ['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=%s' % debuggerPort]
if self.use_flatfile:
args += [
'--flatfile=' + self.flatfile,
]
args += [
'--ice_root=%s' % self.get_ice_dir(),
# if I have multiple jenkins projects doing different h2o clouds, I need
# I need different ports and different cloud name.
# does different cloud name prevent them from joining up
# (even if same multicast ports?)
# I suppose I can force a base address. or run on another machine?
]
args += [
'--name=' + self.cloud_name
]
# ignore the other -hdfs args if the config is used?
if 1==0:
if self.hdfs_config:
args += [
'-hdfs_config=' + self.hdfs_config
]
if self.use_hdfs:
args += [
# it's fine if hdfs_name has a ":9000" port or something too
'-hdfs hdfs://' + self.hdfs_name_node,
'-hdfs_version=' + self.hdfs_version,
]
if self.use_maprfs:
args += [
# 3 slashes?
'-hdfs maprfs:///' + self.hdfs_name_node,
'-hdfs_version=' + self.hdfs_version,
]
if self.aws_credentials:
args += ['--aws_credentials=' + self.aws_credentials]
if self.disable_h2o_log:
args += ['--nolog']
# disable logging of requests, as some contain "error", which fails the test
## FIXED. better escape in check_sandbox_for_errors
## args += ['--no_requests_log']
return args
def __init__(self,
use_this_ip_addr=None, port=54321, capture_output=True,
use_debugger=None, classpath=None,
use_hdfs=False, use_maprfs=False,
# hdfs_version="cdh4", hdfs_name_node="172.16.2.151",
# hdfs_version="cdh4", hdfs_name_node="172.16.2.176",
hdfs_version=None, hdfs_name_node=None, hdfs_config=None,
aws_credentials=None,
use_flatfile=False, java_heap_GB=None, java_heap_MB=None, java_extra_args=None,
use_home_for_ice=False, node_id=None, username=None,
disable_h2o_log=False,
enable_benchmark_log=False,
h2o_remote_buckets_root=None,
delete_keys_at_teardown=False,
cloud_name=None,
):
if use_hdfs:
# see if we can touch a 0xdata machine
try:
# long timeout in ec2...bad
a = requests.get('http://172.16.2.176:80', timeout=1)
hdfs_0xdata_visible = True
except:
hdfs_0xdata_visible = False
# different defaults, depending on where we're running
if hdfs_name_node is None:
if hdfs_0xdata_visible:
hdfs_name_node = "172.16.2.176"
else: # ec2
hdfs_name_node = "10.78.14.235:9000"
if hdfs_version is None:
if hdfs_0xdata_visible:
hdfs_version = "cdh4"
else: # ec2
hdfs_version = "0.20.2"
self.aws_credentials = aws_credentials
self.port = port
# None is legal for self.addr.
# means we won't give an ip to the jar when we start.
# Or we can say use use_this_ip_addr=127.0.0.1, or the known address
# if use_this_addr is None, use 127.0.0.1 for urls and json
# Command line arg 'ipaddr_from_cmd_line' dominates:
if ipaddr_from_cmd_line:
self.addr = ipaddr_from_cmd_line
else:
self.addr = use_this_ip_addr
if self.addr is not None:
self.http_addr = self.addr
else:
self.http_addr = get_ip_address()
# command line should always dominate for enabling
if debugger: use_debugger = True
self.use_debugger = use_debugger
self.classpath = classpath
self.capture_output = capture_output
self.use_hdfs = use_hdfs
self.use_maprfs = use_maprfs
self.hdfs_name_node = hdfs_name_node
self.hdfs_version = hdfs_version
self.hdfs_config = hdfs_config
self.use_flatfile = use_flatfile
self.java_heap_GB = java_heap_GB
self.java_heap_MB = java_heap_MB
self.java_extra_args = java_extra_args
self.use_home_for_ice = use_home_for_ice
self.node_id = node_id
if username:
self.username = username
else:
self.username = getpass.getuser()
# don't want multiple reports from tearDown and tearDownClass
# have nodes[0] remember (0 always exists)
self.sandbox_error_was_reported = False
self.sandbox_ignore_errors = False
self.disable_h2o_log = disable_h2o_log
# this dumps stats from tests, and perf stats while polling to benchmark.log
self.enable_benchmark_log = enable_benchmark_log
self.h2o_remote_buckets_root = h2o_remote_buckets_root
self.delete_keys_at_teardown = delete_keys_at_teardown
if cloud_name:
self.cloud_name = cloud_name
else:
self.cloud_name = 'pytest-%s-%s' % (getpass.getuser(), os.getpid())
def __str__(self):
return '%s - http://%s:%d/' % (type(self), self.http_addr, self.port)
#*****************************************************************
class LocalH2O(H2O):
'''An H2O instance launched by the python framework on the local host using psutil'''
def __init__(self, *args, **kwargs):
super(LocalH2O, self).__init__(*args, **kwargs)
self.rc = None
# FIX! no option for local /home/username ..always the sandbox (LOG_DIR)
self.ice = tmp_dir('ice.')
self.flatfile = flatfile_name()
self.remoteH2O = False # so we can tell if we're remote or local
if self.node_id is not None:
logPrefix = 'local-h2o-' + str(self.node_id)
else:
logPrefix = 'local-h2o'
spawn = spawn_cmd(logPrefix, cmd=self.get_args(), capture_output=self.capture_output)
self.ps = spawn[0]
def get_h2o_jar(self):
return find_file('target/h2o.jar')
def get_flatfile(self):
return self.flatfile
# return find_file(flatfile_name())
def get_ice_dir(self):
return self.ice
def is_alive(self):
verboseprint("Doing is_alive check for LocalH2O", self.wait(0))
return self.wait(0) is None
def terminate_self_only(self):
try:
if self.is_alive(): self.ps.kill()
if self.is_alive(): self.ps.terminate()
return self.wait(0.5)
except psutil.NoSuchProcess:
return -1
def terminate(self):
# send a shutdown request first.
# since local is used for a lot of buggy new code, also do the ps kill.
# try/except inside shutdown_all now
self.shutdown_all()
if self.is_alive():
print "\nShutdown didn't work for local node? : %s. Will kill though" % self
self.terminate_self_only()
def wait(self, timeout=0):
if self.rc is not None:
return self.rc
try:
self.rc = self.ps.wait(timeout)
return self.rc
except psutil.TimeoutExpired:
return None
def stack_dump(self):
self.ps.send_signal(signal.SIGQUIT)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 33398
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
main.py
|
# -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
import sys
sys.path.append("Src")
import time
import signal
from threading import Thread
from Log import LogManager
from Web import WebManager
from Forward.ForwardManager import ForwardHttp
from Manager.ProxyFetch import ProxyFetch
from Schedule.ProxyVerifySchedule import ProxyVerifySchedule
from Schedule.ProxyFetchSchedule import ProxyFetchSchedule
TASK_LIST = {
"ProxyVerifySchedule": ProxyVerifySchedule,
"ProxyFetchSchedule": ProxyFetchSchedule,
"ForwardHttp": ForwardHttp,
}
def show_time():
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
content = "{newline}{symbol} ProxyPool Start, date:{date} {symbol}{newline}".format(newline="\n", symbol="-"*50, date=date)
print(content)
def start_fetch():
ProxyFetch.initQueue()
t = ProxyFetch()
t.start()
def start_task():
start_fetch()
task_list = []
for name in TASK_LIST.keys():
task = TASK_LIST[name]()
t = Thread(target=task.run, name=name)
task_list.append(t)
for t in task_list:
t.daemon = True
t.start()
def stop_handler(signum, frame):
print('Received Signal [%s], Stop Program' % signum)
sys.exit()
def register_signal():
signal.signal(signal.SIGINT, stop_handler)
def main(test=False):
show_time()
register_signal()
LogManager.init()
start_task()
WebManager.run()
if __name__ == '__main__':
main()
|
queue_demo.py
|
'''
使用queue实现生产者-消费者模式
'''
import threading
import time
import queue
# 消费者
def consume(thread_nume, q):
while True:
time.sleep(2)
product = q.get()
print('%s consume %s' % (thread_nume, product))
# 任务处理结束后,调用task_done,
q.task_done()
# 生产者
def produce(thread_name, q):
for i in range(3):
product = 'product-'+str(i)
q.put(product)
print('%s produce %s' % (thread_name, product))
time.sleep(1)
# join() 就是在监听队列是否为空,一旦条件满足则结束阻塞状态。
q.join()
# 队列
q = queue.Queue()
p = threading.Thread(target=produce, args=('producer', q))
p.start()
# 将消费者线程设置为daemon,主线程结束时,消费者线程也会自动结束
c0 = threading.Thread(target=consume, args=('consumer-0', q), daemon=True)
c1 = threading.Thread(target=consume, args=('consumer-1', q), daemon=True)
c0.start()
c1.start()
p.join()
|
solrjmeter.py
|
#!/usr/bin/env python
"""An assistant for measuring MontySolr releases performance.
This script will run jmeter, distill performance
characteristics and graph them in somewhat meaningful
way
Here are the assumptions under which we work:
- we contact solr over HTTP
- the INSTDIR already exists
- results are saved into INSTDIR
This script could be Windows(tm) compatible very
easily, but I didn't care enough ;) get_pidis_running
is probably the only method that needs to change...
"""
import sys
import os
import optparse
import subprocess
import re
import glob
import csv
import simplejson
import datetime
import time
import copy
import Queue
import threading
import urllib
import traceback
from contextlib import contextmanager
from pprint import pprint,pformat
INSTDIR = 'SOLRJMETER_HOME' in os.environ and os.environ['SOLRJMETER_HOME'] or '/var/lib/montysolr'
COMMASPACE = ', '
SPACE = ' '
_NAME = 'solrjmeter'
_RELEASE = 1
if "check_output" not in dir( subprocess ): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
def error(*msgs):
sys.stderr.write("**ERROR**\n")
traceback.print_stack()
for msg in msgs:
sys.stderr.write(msg)
sys.stderr.write("\n")
sys.exit(1)
def run_cmd(args, silent=False, strict=True):
cmd = SPACE.join(map(str, args))
if not silent:
print('$ %s' % cmd)
try:
if silent:
code = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
else:
code = subprocess.call(cmd, shell=True)
except OSError:
error('failed: %s' % cmd)
else:
if strict and code != 0:
error('failed: %s' % cmd)
return code
def get_output(args):
return subprocess.check_output(SPACE.join(args), shell=True)
def make_request(q, url, kwargs):
try:
kwargs['wt'] = 'json'
params = urllib.urlencode(kwargs)
page = ''
conn = urllib.urlopen(url, params)
page = conn.read()
rsp = simplejson.loads(page)
conn.close()
q.put(rsp)
except Exception, e:
q.put(e)
def req(url, **kwargs):
q = Queue.Queue()
t = threading.Thread(target=make_request, args = (q, url, kwargs))
t.start()
t.join(3.0)
r = q.get()
if isinstance(r, Exception):
print 'Error during http request: ', url, kwargs
raise r
elif r is None:
raise Exception("Timeout getting url=%s & %s" % (url, kwargs))
return r
def check_options(options, args):
if '' in args:
args.remove('')
if not options.jmx_test or not os.path.exists(options.jmx_test):
error('option jmx_test must point to a valid JMX file, we got: %s' % options.jmx_test )
options.jmx_test = os.path.abspath(options.jmx_test)
if not options.serverName and not options.serverPort:
error("You must specify both server and port")
options.query_endpoint = 'http://%s:%s%s' % (options.serverName,
options.serverPort,
options.serverPath)
jmx_options = []
for k, v in options.__dict__.items():
if k.lower() != k and v:
jmx_options.append('-D%s=%s' % (k, v))
options.jmx_args = ' '.join(jmx_options)
if options.google_spreadsheet and options.google_user and options.google_password:
options.upload = True
else:
options.upload = False
options.script_home = os.path.join(os.path.abspath(args[0] + '/..'))
timestamp = time.time()
options.timestamp = timestamp
options.timeformat = "%Y-%m-%d %H:%M:%S"
options.today = datetime.datetime.fromtimestamp(timestamp).strftime(options.timeformat)
if options.debug:
options.today_folder = 'debug'
else:
options.today_folder = options.today.replace(':', '.').replace(' ', '.').replace('-', '.')
#if options.queries_pattern is None or options.queries_pattern == '':
# error('Missing --queries_pattern parameter')
if options.generate_queries and '=' not in options.generate_queries:
options.generate_queries = ''
if options.queries_pattern:
patterns = options.queries_pattern.split(',')
options.queries_pattern = []
for p in patterns:
if os.path.isabs(p):
options.queries_pattern.append(p)
else:
options.queries_pattern.append(os.path.abspath(p))
options.workdir = os.path.join(INSTDIR, _NAME)
if options.generate_comparison:
options.generate_comparison = options.generate_comparison.split(',')
if len(options.generate_comparison) < 1:
error("When generating comparison, we need at least two result folders")
for rf in range(len(options.generate_comparison)):
tfolder = options.workdir + "/" + options.generate_comparison[rf]
if options.generate_comparison[rf] == options.results_folder or os.path.exists(tfolder):
continue
error("The folder '%s' does not exist" % rf)
roundup_correction = options.roundup_correction
try:
options.roundup_correction = int(roundup_correction)
except:
if 'week' in roundup_correction:
options.roundup_correction = 7 * 24 * 3600
elif 'day' in roundup_correction:
options.roundup_correction = 24 * 3600
elif 'hour' in roundup_correction:
options.roundup_correction = 3600
elif 'min' in roundup_correction:
options.roundup_correction = 60
elif 'sec' in roundup_correction:
options.roundup_correction = 1
else:
error('Unknown correction value: %s' % roundup_correction)
if os.path.abspath(options.script_home + "/utils") not in sys.path:
sys.path.insert(0, os.path.abspath(options.script_home + "/utils"))
def get_pid(pidpath):
if os.path.exists(pidpath):
with open(pidpath, 'r') as pidfile:
r_pid = pidfile.read().strip()
try:
return int(r_pid)
except ValueError:
return -1
return -1
def acquire_lock(pidpath):
fo = open(pidpath, 'w')
fo.write(str(os.getpid()))
fo.close()
def remove_lock(pidpath):
os.remove(pidpath)
def check_pid_is_running(pid):
if os.path.exists('/proc/%s' % pid):
return True
return False
@contextmanager
def changed_dir(new):
print('$ cd %s' % new)
old = os.getcwd()
os.chdir(new)
try:
yield
finally:
print('$ cd %s' % old)
os.chdir(old)
def check_basics():
if not os.path.exists(INSTDIR):
error('INSTDIR does not exist: %s' % INSTDIR )
if not os.access(INSTDIR, os.W_OK):
error('Cannot write into INSTDIR: %s' % INSTDIR)
def get_arg_parser():
usage = '%prog [options] example.queries example2.queries....'
p = optparse.OptionParser(usage=usage)
p.add_option('-a', '--setup_prerequisities',
default=False, action='store_true',
help='Install all prerequisites')
p.add_option('-x', '--jmx_test',
default='%s/perpetuum/montysolr/contrib/examples/adsabs/jmeter/SolrQueryTest.jmx' % INSTDIR,
action='store',
help='The configuration of the test (location of .jmx file)')
p.add_option('-j', '--setup_jmeter',
default=False, action='store_true',
help='Install jmeter')
p.add_option('-d', '--debug',
default=False, action='store_true',
help='Debug mode (we are saving data into one folder: debug)')
p.add_option('-J', '--java',
default='java', action='store',
help='Java executable')
p.add_option('-q', '--queries_pattern',
default=None, action='store',
help='Pattern to use for retrieving jmeter queries')
p.add_option('-B', '--run_command_before',
default='', action='store',
help='Invoke this command BEFORE running tests - use to restart/update instance')
p.add_option('-A', '--run_command_after',
default='', action='store',
help='Invoke this command AFTER running tests - use to restart/update instance')
p.add_option('-g', '--generate_queries',
default=None, action='store',
help='Generate queries for certain fields (you can pass solr parameters)')
p.add_option('-S', '--save',
default=True, action='store_true',
help='Save results as the test proceeds')
p.add_option('--google_spreadsheet',
default='', action='store',
help='Upload results into a Google spread sheet: x')
p.add_option('--google_user',
default='', action='store',
help='Upload results into a Google spread sheet: username')
p.add_option('--google_password',
default='', action='store',
help='Upload results into a Google spread sheet: password')
p.add_option('-P', '--purge',
default=False, action='store_true',
help='Remove the test folder before running the test (if already exists)')
p.add_option('-R', '--results_folder',
default='results', action='store',
help='Name of the folder where to save results [results]')
p.add_option('-f', '--regenerate_html',
default=False, action='store_true',
help='Regenerate the html view before running any test')
p.add_option('-C', '--generate_comparison',
default=None, action='store',
help='Comma separated list of result folders to create aggregate view')
p.add_option('-c', '--roundup_correction',
default='3600', action='store',
help='Roundup date of the measurement (to cluster) different tests; possible values: day,hour,min,sec,[int=number of seconds]')
p.add_option('-e', '--core_name',
default='', action='store',
help='Name of the core to read statistics from (if empty, default core will be used)')
# JMeter options specific to our .jmx test
p.add_option('-s', '--serverName',
default='adswhy', action='store',
help='Machine we run test against, eg. adswhy')
p.add_option('-p', '--serverPort',
default='', action='store',
help='Port, eg. 9000')
p.add_option('-t', '--serverPath',
default='/solr', action='store',
help='Path to access your solr - eg. /solr or /solr/collection1...')
p.add_option('-i', '--durationInSecs',
default=5, action='store', type='int',
help='How many seconds to run each test')
p.add_option('-r', '--rampUpInSecs',
default=0, action='store', type='int',
help='How many seconds to warm up')
p.add_option('-U', '--noOfUsers',
default=0, action='store', type='int',
help='How many seconds to warm up')
p.add_option('-o', '--additionalSolrParams',
default='', action='store',
help='Additional URL-encoded params to pass with every request')
return p
def check_prerequisities(options):
jmeter = None
try:
jmeter = get_output(['which', 'jmeter'])
if jmeter:
jmeter = jmeter.strip()
except subprocess.CalledProcessError:
pass
if options.setup_jmeter or options.setup_prerequisities:
setup_jmeter(options)
jmeter = os.path.join(INSTDIR, '%s/jmeter/bin/jmeter' % _NAME)
if jmeter:
options.jmeter = jmeter
else:
error('Cannot find executable jmeter (is $PATH set correctly?)')
if options.query_endpoint and options.queries_pattern:
try:
req(options.query_endpoint + "/select?q=star")
except:
error('Cannot contact: %s' % options.query_endpoint)
# try to find the admin endpoint
try:
req(options.query_endpoint + "/admin/cores")
options.admin_endpoint = options.query_endpoint + "/admin"
except:
# possibly a core path
apath = '/'.join(options.query_endpoint.split('/')[0:-1]) + "/admin"
try:
req(apath + '/cores')
options.admin_endpoint = apath
except:
error('Cannot find admin pages: %s, please report a bug' % apath)
def setup_jmeter(options):
"""
On old systems, such as CentOS, jmeter binaries are useless
"""
if os.path.exists('jmeter/RELEASE') and str(get_pid('jmeter/RELEASE')) == str(_RELEASE):
return # already installed
with open("install_jmeter.sh", "w") as build_ant:
build_ant.write("""#!/bin/bash -e
if [ -f apache-jmeter-2.9.tgz ]; then
rm apache-jmeter-2.9.tgz
fi
if [ -d jmeter ]; then
rm -fr jmeter
fi
wget -nc https://archive.apache.org/dist/jmeter/binaries/apache-jmeter-2.9.tgz
tar -xzf apache-jmeter-2.9.tgz
mv apache-jmeter-2.9 jmeter
wget -nc http://jmeter-plugins.org/downloads/file/JMeterPlugins-Standard-1.1.1.zip
wget -nc http://jmeter-plugins.org/downloads/file/JMeterPlugins-Extras-1.1.1.zip
unzip -o JMeterPlugins-Standard-1.1.1.zip -d jmeter
unzip -o JMeterPlugins-Extras-1.1.1.zip -d jmeter
echo "%(release)s" > jmeter/RELEASE
""" % {'release': _RELEASE})
run_cmd(['chmod', 'u+x', 'install_jmeter.sh'])
run_cmd(['./install_jmeter.sh'])
def update_montysolr(options):
if options.update_command:
run_cmd([options.invoke_update, '>', 'running_update_command.log'])
def generate_queries(options):
from utils import generate_queries as gq
gq.main(options)
def find_tests(options):
if options.queries_pattern:
tests = set()
for pattern in options.queries_pattern:
if os.path.exists(pattern):
if os.path.isfile(pattern):
tests.add(pattern)
else:
with changed_dir(pattern):
for x in glob.glob('*.queries'):
tests.add(x)
else:
for x in glob.glob(pattern):
tests.add(x)
return tests
else:
return glob.glob(os.path.join(INSTDIR, 'perpetuum/montysolr/contrib/examples/adsabs/jmeter/*.queries'))
class ForgivingDict( dict ):
def __getitem__( self, key ):
try:
x = super( ForgivingDict,self).__getitem__(key)
if isinstance(x, dict):
return ForgivingDict(x)
return x
except KeyError as e:
return '<MISSING>'
def harvest_details_about_montysolr(options):
system_data = ForgivingDict(req('%s/admin/system' % options.query_endpoint))
mbeans_data = ForgivingDict(req('%s/admin/mbeans' % options.query_endpoint, stats='true'))
cores_data = ForgivingDict(req('%s/cores' % options.admin_endpoint, stats='true'))
if 'QUERYHANDLER' in mbeans_data['solr-mbeans']:
return _get_solr4x6x_data(options, system_data, mbeans_data, cores_data)
else:
return _get_solr7x_data(options, system_data, mbeans_data, cores_data)
def _get_solr7x_data(options, system_data, mbeans_data, cores_data):
cn = options.core_name or cores_data['defaultCoreName']
ci = mbeans_data['solr-mbeans'].index('CORE')+1
ch = mbeans_data['solr-mbeans'].index('QUERY')+1
cc = mbeans_data['solr-mbeans'].index('CACHE')+1
out = dict(
host = system_data['core']['host'],
now = system_data['core']['now'],
start = system_data['core']['start'],
jvmVersion = system_data['jvm']['version'],
jvmName = system_data['jvm']['name'],
jvmProcessors = system_data['jvm']['processors'],
jvmFree = system_data['jvm']['memory']['free'],
jvmTotal = system_data['jvm']['memory']['total'],
jvmMax = system_data['jvm']['memory']['max'],
jvmUsed = system_data['jvm']['memory']['used'],
jvmUsedRaw = system_data['jvm']['memory']['raw']['used'],
jvmCommandLineArgs = ' '.join(system_data['jvm']['jmx']['commandLineArgs']),
systemName = system_data['system']['name'],
systemVersion = system_data['system']['version'],
systemArch = system_data['system']['arch'],
systemLoadAverage = system_data['system']['systemLoadAverage'],
systemCommittedVirtualMemorySize = system_data['system']['committedVirtualMemorySize'],
systemFreePhysicalMemorySize = system_data['system']['freePhysicalMemorySize'],
systemFreeSwapSpaceSize = system_data['system']['freeSwapSpaceSize'],
systemProcessCpuTime = system_data['system']['processCpuTime'],
systemTotalPhysicalMemorySize = system_data['system']['totalPhysicalMemorySize'],
systemTotalSwapSpaceSize = system_data['system']['totalSwapSpaceSize'],
systemOpenFileDescriptorCount = system_data['system']['openFileDescriptorCount'],
systemMaxFileDescriptorCount = system_data['system']['maxFileDescriptorCount'],
systemUname = system_data['system']['uname'],
systemUptime = system_data['system']['uptime'],
solrSpecVersion = system_data['lucene']['solr-spec-version'],
solrImplVersion = system_data['lucene']['solr-impl-version'],
luceneSpecVersion = system_data['lucene']['lucene-spec-version'],
luceneImplVersion = system_data['lucene']['lucene-impl-version'],
instanceDir=cores_data['status'][cn]['instanceDir'],
dataDir=cores_data['status'][cn]['dataDir'],
startTime = cores_data['status'][cn]['startTime'],
uptime = cores_data['status'][cn]['uptime'],
indexNumDocs = cores_data['status'][cn]['index']['numDocs'],
indexMaxDoc = cores_data['status'][cn]['index']['maxDoc'],
indexVersion = cores_data['status'][cn]['index']['version'],
indexSegmentCount = cores_data['status'][cn]['index']['segmentCount'],
indexCurrent = cores_data['status'][cn]['index']['current'],
indexHasDeletions = cores_data['status'][cn]['index']['hasDeletions'],
indexDirectory = cores_data['status'][cn]['index']['directory'],
indexLstModified = cores_data['status'][cn]['index']['lastModified'],
indexSizeInBytes = cores_data['status'][cn]['index']['sizeInBytes'],
indexSize = cores_data['status'][cn]['index']['size'],
coreRefCount = mbeans_data['solr-mbeans'][ci]['core']['stats']['CORE.refCount'],
searcherClass = mbeans_data['solr-mbeans'][ci]['searcher']['class'],
searcherCaching = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['SEARCHER.searcher.caching'],
searcherReader = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['SEARCHER.searcher.reader'],
searcherOpenedAt = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['SEARCHER.searcher.openedAt'],
searcherRegisteredAt = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['SEARCHER.searcher.registeredAt'],
searcherWarmupTime = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['SEARCHER.searcher.warmupTime'],
selectClass = mbeans_data['solr-mbeans'][ch]['/select']['class'],
#selectVersion = mbeans_data['solr-mbeans'][ch]['/select']['version'],
selectDescription = mbeans_data['solr-mbeans'][ch]['/select']['description'],
selectRequests = mbeans_data['solr-mbeans'][ch]['/select']['stats']['QUERY./select.requests'],
selectErrors = mbeans_data['solr-mbeans'][ch]['/select']['stats']['QUERY./select.errors.count'],
selectTimeouts = mbeans_data['solr-mbeans'][ch]['/select']['stats']['QUERY./select.timeouts.count'],
selectTotalTime = mbeans_data['solr-mbeans'][ch]['/select']['stats']['QUERY./select.totalTime'],
#selectAvgTimePerRequest = mbeans_data['solr-mbeans'][ch]['/select']['stats']['avgTimePerRequest'],
selectAvgRequestsPerSecond = mbeans_data['solr-mbeans'][ch]['/select']['stats']['QUERY./select.requestTimes.meanRate'],
cacheQueryClass = mbeans_data['solr-mbeans'][cc]['queryResultCache']['class'],
#cacheQueryVersion = mbeans_data['solr-mbeans'][cc]['queryResultCache']['version'],
cacheQueryDescription = mbeans_data['solr-mbeans'][cc]['queryResultCache']['description'],
cacheQueryLookups = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.lookups'],
cacheQueryHits = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.hits'],
cacheQueryHitRatio = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.hitratio'],
cacheQueryEvictions = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.evictions'],
cacheQuerySize = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.size'],
cacheQueryWarmupTime = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.warmupTime'],
cacheQueryCumulativeLookups = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.cumulative_lookups'],
cacheQueryCumulativeHits = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.cumulative_hits'],
cacheQueryCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.cumulative_hitratio'],
cacheQueryCumulativeInserts = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.cumulative_inserts'],
cacheQueryCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['CACHE.searcher.queryResultCache.cumulative_evictions'],
cacheFieldClass = mbeans_data['solr-mbeans'][cc]['fieldCache']['class'],
#cacheFieldVersion = mbeans_data['solr-mbeans'][cc]['fieldCache']['version'],
cacheFieldDescription = mbeans_data['solr-mbeans'][cc]['fieldCache']['description'],
cacheFieldEntriesCount = mbeans_data['solr-mbeans'][cc]['fieldCache']['stats']['CACHE.core.fieldCache.entries_count'],
cacheDocumentClass = mbeans_data['solr-mbeans'][cc]['documentCache']['class'],
#cacheDocumentVersion = mbeans_data['solr-mbeans'][cc]['documentCache']['version'],
cacheDocumentDescription = mbeans_data['solr-mbeans'][cc]['documentCache']['description'],
cacheDocumentLookups = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.lookups'],
cacheDocumentHits = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.hits'],
cacheDocumentHitRatio = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.hitratio'],
cacheDocumentEvictions = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.evictions'],
cacheDocumentSize = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.size'],
cacheDocumentWarmupTime = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.warmupTime'],
cacheDocumentCumulativeLookups = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.cumulative_lookups'],
cacheDocumentCumulativeHits = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.cumulative_hits'],
cacheDocumentCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.cumulative_hitratio'],
cacheDocumentCumulativeInserts = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.cumulative_inserts'],
cacheDocumentCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['CACHE.searcher.documentCache.cumulative_evictions'],
cacheFieldValueClass = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['class'],
#cacheFieldValueVersion = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['version'],
cacheFieldValueDescription = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['description'],
cacheFieldValueLookups = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.lookups'],
cacheFieldValueHits = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.hits'],
cacheFieldValueHitRatio = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.hitratio'],
cacheFieldValueEvictions = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.evictions'],
cacheFieldValueSize = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.size'],
cacheFieldValueWarmupTime = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.warmupTime'],
cacheFieldValueCumulativeLookups = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.cumulative_lookups'],
cacheFieldValueCumulativeHits = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.cumulative_hits'],
cacheFieldValueCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.cumulative_hitratio'],
cacheFieldValueCumulativeInserts = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.cumulative_inserts'],
cacheFieldValueCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['CACHE.searcher.fieldValueCache.cumulative_evictions'],
cacheFilterClass = mbeans_data['solr-mbeans'][cc]['filterCache']['class'],
#cacheFilterVersion = mbeans_data['solr-mbeans'][cc]['filterCache']['version'],
cacheFilterDescription = mbeans_data['solr-mbeans'][cc]['filterCache']['description'],
cacheFilterLookups = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.lookups'],
cacheFilterHits = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.hits'],
cacheFilterHitRatio = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.hitratio'],
cacheFilterEvictions = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.evictions'],
cacheFilterSize = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.size'],
cacheFilterWarmupTime = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.warmupTime'],
cacheFilterCumulativeLookups = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.cumulative_lookups'],
cacheFilterCumulativeHits = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.cumulative_hits'],
cacheFilterCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.cumulative_hitratio'],
cacheFilterCumulativeInserts = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.cumulative_inserts'],
cacheFilterCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['CACHE.searcher.filterCache.cumulative_evictions'],
)
return out
def _get_solr4x6x_data(options, system_data, mbeans_data, cores_data):
cn = options.core_name or cores_data['defaultCoreName']
ci = mbeans_data['solr-mbeans'].index('CORE')+1
ch = mbeans_data['solr-mbeans'].index('QUERYHANDLER')+1
cc = mbeans_data['solr-mbeans'].index('CACHE')+1
out = dict(
host = system_data['core']['host'],
now = system_data['core']['now'],
start = system_data['core']['start'],
jvmVersion = system_data['jvm']['version'],
jvmName = system_data['jvm']['name'],
jvmProcessors = system_data['jvm']['processors'],
jvmFree = system_data['jvm']['memory']['free'],
jvmTotal = system_data['jvm']['memory']['total'],
jvmMax = system_data['jvm']['memory']['max'],
jvmUsed = system_data['jvm']['memory']['used'],
jvmUsedRaw = system_data['jvm']['memory']['raw']['used'],
jvmCommandLineArgs = ' '.join(system_data['jvm']['jmx']['commandLineArgs']),
systemName = system_data['system']['name'],
systemVersion = system_data['system']['version'],
systemArch = system_data['system']['arch'],
systemLoadAverage = system_data['system']['systemLoadAverage'],
systemCommittedVirtualMemorySize = system_data['system']['committedVirtualMemorySize'],
systemFreePhysicalMemorySize = system_data['system']['freePhysicalMemorySize'],
systemFreeSwapSpaceSize = system_data['system']['freeSwapSpaceSize'],
systemProcessCpuTime = system_data['system']['processCpuTime'],
systemTotalPhysicalMemorySize = system_data['system']['totalPhysicalMemorySize'],
systemTotalSwapSpaceSize = system_data['system']['totalSwapSpaceSize'],
systemOpenFileDescriptorCount = system_data['system']['openFileDescriptorCount'],
systemMaxFileDescriptorCount = system_data['system']['maxFileDescriptorCount'],
systemUname = system_data['system']['uname'],
systemUptime = system_data['system']['uptime'],
solrSpecVersion = system_data['lucene']['solr-spec-version'],
solrImplVersion = system_data['lucene']['solr-impl-version'],
luceneSpecVersion = system_data['lucene']['lucene-spec-version'],
luceneImplVersion = system_data['lucene']['lucene-impl-version'],
instanceDir=cores_data['status'][cn]['instanceDir'],
dataDir=cores_data['status'][cn]['dataDir'],
startTime = cores_data['status'][cn]['startTime'],
uptime = cores_data['status'][cn]['uptime'],
indexNumDocs = cores_data['status'][cn]['index']['numDocs'],
indexMaxDoc = cores_data['status'][cn]['index']['maxDoc'],
indexVersion = cores_data['status'][cn]['index']['version'],
indexSegmentCount = cores_data['status'][cn]['index']['segmentCount'],
indexCurrent = cores_data['status'][cn]['index']['current'],
indexHasDeletions = cores_data['status'][cn]['index']['hasDeletions'],
indexDirectory = cores_data['status'][cn]['index']['directory'],
indexLstModified = cores_data['status'][cn]['index']['lastModified'],
indexSizeInBytes = cores_data['status'][cn]['index']['sizeInBytes'],
indexSize = cores_data['status'][cn]['index']['size'],
coreRefCount = mbeans_data['solr-mbeans'][ci]['core']['stats']['refCount'],
searcherClass = mbeans_data['solr-mbeans'][ci]['searcher']['class'],
searcherCaching = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['caching'],
searcherReader = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['reader'],
searcherOpenedAt = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['openedAt'],
searcherRegisteredAt = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['registeredAt'],
searcherWarmupTime = mbeans_data['solr-mbeans'][ci]['searcher']['stats']['warmupTime'],
selectClass = mbeans_data['solr-mbeans'][ch]['/select']['class'],
selectVersion = mbeans_data['solr-mbeans'][ch]['/select']['version'],
selectDescription = mbeans_data['solr-mbeans'][ch]['/select']['description'],
selectRequests = mbeans_data['solr-mbeans'][ch]['/select']['stats']['requests'],
selectErrors = mbeans_data['solr-mbeans'][ch]['/select']['stats']['errors'],
selectTimeouts = mbeans_data['solr-mbeans'][ch]['/select']['stats']['timeouts'],
selectTotalTime = mbeans_data['solr-mbeans'][ch]['/select']['stats']['totalTime'],
selectAvgTimePerRequest = mbeans_data['solr-mbeans'][ch]['/select']['stats']['avgTimePerRequest'],
selectAvgRequestsPerSecond = mbeans_data['solr-mbeans'][ch]['/select']['stats']['avgRequestsPerSecond'],
cacheQueryClass = mbeans_data['solr-mbeans'][cc]['queryResultCache']['class'],
cacheQueryVersion = mbeans_data['solr-mbeans'][cc]['queryResultCache']['version'],
cacheQueryDescription = mbeans_data['solr-mbeans'][cc]['queryResultCache']['description'],
cacheQueryLookups = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['lookups'],
cacheQueryHits = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['hits'],
cacheQueryHitRatio = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['hitratio'],
cacheQueryEvictions = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['evictions'],
cacheQuerySize = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['size'],
cacheQueryWarmupTime = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['warmupTime'],
cacheQueryCumulativeLookups = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['cumulative_lookups'],
cacheQueryCumulativeHits = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['cumulative_hits'],
cacheQueryCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['cumulative_hitratio'],
cacheQueryCumulativeInserts = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['cumulative_inserts'],
cacheQueryCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['queryResultCache']['stats']['cumulative_evictions'],
cacheFieldClass = mbeans_data['solr-mbeans'][cc]['fieldCache']['class'],
cacheFieldVersion = mbeans_data['solr-mbeans'][cc]['fieldCache']['version'],
cacheFieldDescription = mbeans_data['solr-mbeans'][cc]['fieldCache']['description'],
cacheFieldEntriesCount = mbeans_data['solr-mbeans'][cc]['fieldCache']['stats']['entries_count'],
cacheDocumentClass = mbeans_data['solr-mbeans'][cc]['documentCache']['class'],
cacheDocumentVersion = mbeans_data['solr-mbeans'][cc]['documentCache']['version'],
cacheDocumentDescription = mbeans_data['solr-mbeans'][cc]['documentCache']['description'],
cacheDocumentLookups = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['lookups'],
cacheDocumentHits = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['hits'],
cacheDocumentHitRatio = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['hitratio'],
cacheDocumentEvictions = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['evictions'],
cacheDocumentSize = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['size'],
cacheDocumentWarmupTime = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['warmupTime'],
cacheDocumentCumulativeLookups = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['cumulative_lookups'],
cacheDocumentCumulativeHits = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['cumulative_hits'],
cacheDocumentCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['cumulative_hitratio'],
cacheDocumentCumulativeInserts = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['cumulative_inserts'],
cacheDocumentCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['documentCache']['stats']['cumulative_evictions'],
cacheFieldValueClass = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['class'],
cacheFieldValueVersion = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['version'],
cacheFieldValueDescription = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['description'],
cacheFieldValueLookups = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['lookups'],
cacheFieldValueHits = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['hits'],
cacheFieldValueHitRatio = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['hitratio'],
cacheFieldValueEvictions = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['evictions'],
cacheFieldValueSize = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['size'],
cacheFieldValueWarmupTime = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['warmupTime'],
cacheFieldValueCumulativeLookups = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['cumulative_lookups'],
cacheFieldValueCumulativeHits = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['cumulative_hits'],
cacheFieldValueCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['cumulative_hitratio'],
cacheFieldValueCumulativeInserts = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['cumulative_inserts'],
cacheFieldValueCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['fieldValueCache']['stats']['cumulative_evictions'],
cacheFilterClass = mbeans_data['solr-mbeans'][cc]['filterCache']['class'],
cacheFilterVersion = mbeans_data['solr-mbeans'][cc]['filterCache']['version'],
cacheFilterDescription = mbeans_data['solr-mbeans'][cc]['filterCache']['description'],
cacheFilterLookups = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['lookups'],
cacheFilterHits = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['hits'],
cacheFilterHitRatio = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['hitratio'],
cacheFilterEvictions = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['evictions'],
cacheFilterSize = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['size'],
cacheFilterWarmupTime = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['warmupTime'],
cacheFilterCumulativeLookups = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['cumulative_lookups'],
cacheFilterCumulativeHits = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['cumulative_hits'],
cacheFilterCumulativeHitRatio = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['cumulative_hitratio'],
cacheFilterCumulativeInserts = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['cumulative_inserts'],
cacheFilterCumulativeEvictions = mbeans_data['solr-mbeans'][cc]['filterCache']['stats']['cumulative_evictions'],
)
return out
def run_test(test, options):
save_into_file('before-test.json', simplejson.dumps(harvest_details_about_montysolr(options)))
# run the test, results will be summary_report.data
run_cmd(['%(jmeter)s -n -t %(jmx_test)s %(jmx_args)s -l results.jtl -DqueryFile=%(query_file)s ' \
'-DbaseDir=%(basedir)s' %
dict(jmeter=options.jmeter,
jmx_test=options.jmx_test, jmx_args=options.jmx_args, query_file=test,
basedir=os.path.abspath('.'))])
save_into_file('after-test.json', simplejson.dumps(harvest_details_about_montysolr(options)))
def tablify(csv_filepath, dygraph_format=False):
with open(csv_filepath, 'r') as f:
data = csv.reader(f)
labels = data.next()
if dygraph_format:
labs = [labels[0]]
for x in labels[1:]:
labs.append(x)
labs.append('stdev')
labels = labs
return Table(*[Table.Column(x[0], tuple(x[1:])) for x in zip(labels, *list(data))])
def run_cmd_thread(*args):
if str(os.environ.get('RUN_SEQUENTIALLY', 'false')).lower() == 'false':
t = threading.Thread(target=run_cmd, args=args)
#t.daemon = True
t.start()
#t.run()
else:
run_cmd(*args)
def generate_graphs(options):
# now generate various metrics/graphs from the summary
reporter = '%(java)s -jar %(jmeter_base)s/lib/ext/CMDRunner.jar --tool Reporter' \
' --input-jtl summary_report.data' % dict(java=options.java,
jmeter_base=os.path.abspath(options.jmeter + '/../..'))
orig_thread_count = threading.active_count()
run_cmd_thread([reporter, '--plugin-type AggregateReport --generate-csv aggregate-report.csv'])
run_cmd_thread([reporter, '--plugin-type BytesThroughputOverTime --generate-png bytes-throughput-over-time.png'])
run_cmd_thread([reporter, '--plugin-type BytesThroughputOverTime --generate-csv bytes-throughput-over-time.csv'])
# the same info is at response-codes-per-sec including the number of failed requests
#run_cmd([reporter, '--plugin-type HitsPerSecond --generate-png hits-per-sec.png'])
#run_cmd([reporter, '--plugin-type HitsPerSecond --generate-csv hits-per-sec.csv'])
run_cmd_thread([reporter, '--plugin-type LatenciesOverTime --generate-png latencies-over-time.png'])
run_cmd_thread([reporter, '--plugin-type LatenciesOverTime --generate-csv latencies-over-time.csv'])
run_cmd_thread([reporter, '--plugin-type ResponseCodesPerSecond --generate-png response-codes-per-sec.png'])
run_cmd_thread([reporter, '--plugin-type ResponseCodesPerSecond --generate-csv response-codes-per-sec.csv'])
# histogram of number of responses that fit in 100ms, 1s, 10s,
run_cmd_thread([reporter, '--plugin-type ResponseTimesDistribution --generate-png response-times-distribution-10.png --granulation 10'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesDistribution --generate-png response-times-distribution-100.png --granulation 100'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesDistribution --generate-png response-times-distribution-1000.png --granulation 1000'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesDistribution --generate-csv response-times-distribution-10.csv --granulation 10'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesDistribution --generate-csv response-times-distribution-100.csv --granulation 100'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesDistribution --generate-csv response-times-distribution-1000.csv --granulation 1000'])
# time series of #no of responses during test
run_cmd_thread([reporter, '--plugin-type ResponseTimesOverTime --generate-png response-times-over-time-10.png --granulation 100'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesOverTime --generate-png response-times-over-time-100.png --granulation 1000'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesOverTime --generate-png response-times-over-time-1000.png --granulation 10000'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesOverTime --generate-csv response-times-over-time-10.csv --granulation 100'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesOverTime --generate-csv response-times-over-time-100.csv --granulation 1000'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesOverTime --generate-csv response-times-over-time-1000.csv --granulation 10000'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesPercentiles --generate-png response-times-percentiles.png'])
run_cmd_thread([reporter, '--plugin-type ResponseTimesPercentiles --generate-csv response-times-percentiles.csv'])
#run_cmd([reporter, '--plugin-type ThroughputOverTime --generate-png throughput-over-time.png'])
#run_cmd([reporter, '--plugin-type ThroughputOverTime --generate-csv throughput-over-time.csv'])
run_cmd_thread([reporter, '--plugin-type ThroughputVsThreads --generate-png throughput-vs-threads.png'])
run_cmd_thread([reporter, '--plugin-type ThroughputVsThreads --generate-csv throughput-vs-threads.csv'])
run_cmd_thread([reporter, '--plugin-type TimesVsThreads --generate-png times-vs-threads.png'])
run_cmd_thread([reporter, '--plugin-type TimesVsThreads --generate-csv times-vs-threads.csv'])
run_cmd_thread([reporter, '--plugin-type TransactionsPerSecond --generate-png transactions-per-sec.png'])
run_cmd_thread([reporter, '--plugin-type TransactionsPerSecond --generate-csv transactions-per-sec.csv'])
run_cmd_thread([reporter, '--plugin-type PageDataExtractorOverTime --generate-png page-data-extractor-over-time.png'])
run_cmd_thread([reporter, '--plugin-type PageDataExtractorOverTime --generate-csv page-data-extractor-over-time.csv'])
max_sleep = 120
slept = 0.0
while threading.active_count() > orig_thread_count:
time.sleep(0.4)
slept += 0.4
if slept > max_sleep:
error('We got a zombie!')
def harvest_results(test_name, results):
aggregate = tablify('aggregate-report.csv')
percentiles = tablify('response-times-percentiles.csv')
results.add_datapoint(test_name, 'avg', '%0.3f' % float(aggregate.get_col_byname('average')[0]))
results.add_datapoint(test_name, 'stdev', '%0.3f' % float(aggregate.get_col_byname('aggregate_report_stddev')[0]))
results.add_datapoint(test_name, 'count', aggregate.get_col_byname('aggregate_report_count')[0])
results.add_datapoint(test_name, 'median', aggregate.get_col_byname('aggregate_report_median')[0])
results.add_datapoint(test_name, 'max', aggregate.get_col_byname('aggregate_report_max')[0])
results.add_datapoint(test_name, 'min', aggregate.get_col_byname('aggregate_report_min')[0])
results.add_datapoint(test_name, '%error', '%0.3f' % float(aggregate.get_col_byname('aggregate_report_error%')[0]))
results.add_datapoint(test_name, '%90', percentiles.get_row_byvalue("Percentiles", "90.0")[1])
results.add_datapoint(test_name, '%95', percentiles.get_row_byvalue("Percentiles", "95.0")[1])
results.add_datapoint(test_name, '%98', percentiles.get_row_byvalue("Percentiles", "98.0")[1])
results.add_datapoint(test_name, '%99', percentiles.get_row_byvalue("Percentiles", "99.0")[1])
def save_results(options, results):
tests = results.get_tests()
if len(tests) == 0:
return
with open('aggregate-report.csv', 'w') as aggr:
writer = csv.writer(aggr, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
keys = sorted(tests[0][1].keys())
writer.writerow(['Test'] + keys)
for tname, tvalues in tests:
row = [tname]
for k in keys:
if k in tvalues:
row.append(tvalues[k])
else:
row.append('-')
writer.writerow(row)
def upload_results(options, results):
pass
def save_into_file(path, value):
fo = open(path, 'w')
fo.write(str(value))
fo.close()
def generate_includes(options, force=False):
if not os.path.exists('style.css') or force:
run_cmd(['cp', options.script_home + '/html/style.css', './'])
if not os.path.exists('javascript.js') or force:
run_cmd(['cp', options.script_home + '/html/javascript.js', './'])
if not os.path.exists('dygraph-combined.js') or force:
run_cmd(['cp', options.script_home + '/html/dygraph-combined.js', './'])
def update_global_dashboard(options, results, previous_results=None):
for test_name, test_results in results.get_tests():
csv_file = test_name + '.csv'
if not os.path.exists(csv_file):
w = open(csv_file, 'w')
if previous_results:
w.write('Date,Avg,Prev\n')
else:
w.write('Date,Avg\n')
#w.write(options.today + '0.0,0.0\n')
else:
w = open(csv_file, 'a')
if previous_results:
data = {'prev': test_results['avg'], 'prev_stdev': '0.0'}
data.update(test_results)
if previous_results.get_test_by_name(test_name) != None:
prev = previous_results.get_test_by_name(test_name)
data['prev'] = prev['avg']
#data['prev_stdev'] = prev['stdev']
w.write(options.today + ',%(avg)s,%(stdev)s,%(prev)s,%(prev_stdev)s\n' % data)
else:
w.write(options.today + ',%(avg)s,%(stdev)s,-1,0\n' % test_results)
def regenerate_global_dashboard(options):
valid_csvs = sorted([x[0:-4] for x in glob.glob('*.csv')])
with open(options.script_home + '/html/dashboard-view.tmpl', 'r') as t:
tmpl = t.read()
kwargs = load_tmpl_kwargs(options)
one_block = """
<div class="dygraph-container">
<div class="dygraph-block" id="%(test_name)s">
<script type="text/javascript">
doGraph('%(test_name)s');
</script>
</div>
</div>
"""
blocks = '\n'.join([one_block % {'test_name': test_name} for test_name in valid_csvs])
kwargs['blocks'] = blocks
with open('dashboard-view.html', 'w') as w:
w.write(tmpl % kwargs)
def generate_today_dashboard(options, results):
# assemble a page that points to various measurements
with open(options.script_home + '/html/day-view.tmpl', 'r') as t:
tmpl = t.read()
kwargs = load_tmpl_kwargs(options)
valid_tests = []
for f in os.listdir('.'):
if os.path.isfile(f) or f == '.' or f == '..':
continue
valid_tests.append(f)
valid_tests = sorted(valid_tests)
one_block = """
<div class="test-block">
<h3>%(test_name)s</h3>
<p>
<a href="%(test_name)s/test-view.html">
<img src="%(test_name)s/transactions-per-sec.png" title="Responses that were rejected as invalid + HTTP error codes (ideally, you will see only successess)"/>
</a>
</p>
</div>
"""
blocks = '\n'.join([one_block % {'test_name': test_name} for test_name in valid_tests])
kwargs['blocks'] = blocks
with open('day-view.html', 'w') as w:
w.write(tmpl % kwargs)
def generate_one_run_html(test_name, options, ):
with open(options.script_home + '/html/test-view.tmpl', 'r') as t:
tmpl = t.read()
kwargs = load_tmpl_kwargs(options)
kwargs['test_name'] = test_name
kwargs['transactions_per_sec'] = str(tablify('transactions-per-sec.csv'))
kwargs['response_codes_per_sec'] = str(tablify('response-codes-per-sec.csv'))
kwargs['bytes_throughput_over_time'] = str(tablify('bytes-throughput-over-time.csv'))
kwargs['latencies_over_time'] = str(tablify('latencies-over-time.csv'))
kwargs['response_times_distribution_10'] = str(tablify('response-times-distribution-10.csv'))
kwargs['response_times_distribution_100'] = str(tablify('response-times-distribution-100.csv'))
kwargs['response_times_distribution_1000'] = str(tablify('response-times-distribution-1000.csv'))
kwargs['response_times_over_time_10'] = str(tablify('response-times-over-time-10.csv'))
kwargs['response_times_over_time_100'] = str(tablify('response-times-over-time-100.csv'))
kwargs['response_times_over_time_1000'] = str(tablify('response-times-over-time-1000.csv'))
kwargs['response_times_percentiles'] = str(tablify('response-times-percentiles.csv'))
kwargs['throughput_vs_threads'] = str(tablify('throughput-vs-threads.csv'))
kwargs['times_vs_threads'] = str(tablify('times-vs-threads.csv'))
with open('test-view.html', 'w') as w:
w.write(tmpl % kwargs)
def load_tmpl_kwargs(options):
kwargs = {}
kwargs.update(options.__dict__)
if os.path.exists('before-test.json'):
before_test = simplejson.load(open('before-test.json', 'r'))
after_test = simplejson.load(open('after-test.json', 'r'))
for k,v in after_test.items():
if before_test[k] != v:
after_test[k] = '<b>%s</b>' % v
kwargs.update(before_test)
kwargs['before_test'] = pformat(before_test)
kwargs['after_test'] = pformat(after_test)
kwargs['jvmCommandLineArgs'] = '\n'.join(kwargs['jvmCommandLineArgs'].split())
if os.path.exists('aggregate-report.csv'):
aggregate = tablify('aggregate-report.csv')
kwargs['aggregate_report'] = str(aggregate)
if os.path.exists('runtime-env.json'):
runtime_env = simplejson.load(open('runtime-env.json', 'r'))
kwargs['runtime_env'] = pformat(runtime_env)
return kwargs
def regenerate_html(options):
options_copy = copy.deepcopy(options)
collected_results = []
for date_folder in sorted(os.listdir('.')):
if not os.path.isdir(date_folder):
continue
with changed_dir(date_folder):
results = JMeterResults()
runtime = simplejson.load(open('runtime-env.json', 'r'))
options_copy.__dict__ = runtime
collected_results.append((runtime, results))
for test_folder in os.listdir('.'):
if not os.path.isdir(test_folder):
continue
with changed_dir(test_folder):
print 'Regenerating test view'
generate_one_run_html(test_folder, options_copy)
harvest_results(test_folder, results)
print 'Regenerating day view'
generate_today_dashboard(options_copy, results)
print 'Regenerating dashboard view'
valid_csvs = sorted([x[0:-4] for x in glob.glob('*.csv')])
for csv in valid_csvs:
run_cmd(['rm', csv + '.csv'])
if len(collected_results) > 0:
previous_results = collected_results[0][1]
for runtime, results in collected_results:
options_copy.__dict__ = runtime
update_global_dashboard(options_copy, results, previous_results)
previous_results = results
regenerate_global_dashboard(options_copy)
print 'Regenerating includes'
generate_includes(options_copy, force=True)
def csv_reader(csv_file, generic=False):
count = -1
colnames = None
fi = open(csv_file, 'r')
csv_reader = csv.reader(fi, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL,
escapechar='\\')
try:
for data in csv_reader:
if len(data) == 0 or len(data) > 0 and data[0][0] == '#':
continue
count += 1
if count == 0:
colnames = data
continue
if generic:
yield data
else:
yield Measurement(data[0], count, colnames, data[1:])
finally:
fi.close()
@contextmanager
def csv_writer(csv_file, col_names=None):
fo = open(csv_file, 'w')
writer = csv.writer(fo, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
if col_names:
writer.writerow(col_names)
try:
yield writer
finally:
fo.close()
def generate_top_level_comparison(options):
js_blocks = []
one_block = """
<div class="dygraph-container">
<div class="dygraph-block" id="%(comparison_id)s">
<script type="text/javascript">
var data = %(offsets)s;
doComparisonGraph('%(comparison_id)s', '%(test_name)s', data);
</script>
</div>
<p><a href="%(comparison_id)s.csv">download</a> <a href="javascript:change('%(comparison_id)s-table')">show/hide</a><br/>
<pre class="page_details" id="%(comparison_id)s-table">
%(csv_table)s
</pre>
</p>
</div>
"""
# first discover tests that are common
valid_tests = {}
for results_folder in options.generate_comparison:
with changed_dir(results_folder):
valid_csvs = sorted([x[0:-4] for x in glob.glob('*.csv')])
for vc in valid_csvs:
valid_tests.setdefault(vc, 0)
valid_tests[vc] += 1
# remove tests that are not present in all folders
max_count = max(valid_tests.values())
for to_remove in filter(lambda x: x[1] != max_count, valid_tests.items()):
del valid_tests[to_remove[0]]
roundup_correction = options.roundup_correction
aggregated = []
# read in data, apply rounding up corrections
for test_name in valid_tests.keys():
results = {}
print 'reading: %s' % results_folder + "/" + test_name + '.csv'
for test_id, results_folder in zip(range(len(options.generate_comparison)), options.generate_comparison):
for measurement in csv_reader(results_folder + "/" + test_name + '.csv'):
d = list(measurement.get_measurements())[0] # only first one is interesting for us
timestamp = time.mktime(datetime.datetime.strptime(d.date, options.timeformat).timetuple())
rounded_timestamp = timestamp - (timestamp % roundup_correction)
if rounded_timestamp not in results:
results[rounded_timestamp] = [[] for x in range(len(options.generate_comparison))]
results[rounded_timestamp][test_id].append((test_id, timestamp, d.value, d.stdev))
# transform results into simple CSV (some dates may be missing, or be present multiple times [inside interval])
offsets = {}
for k in options.generate_comparison:
offsets[k] = []
report_name = 'comparison-%s-%s-aggregate-report' % ('_'.join(options.generate_comparison), test_name)
with open(report_name + '.csv', 'w') as aggr:
writer = csv.writer(aggr, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Date'] + options.generate_comparison)
for rounded_timestamp, data in sorted(results.items(), key=lambda x: x[0]):
max_no_of_observations = max([len(x) for x in data])
interval = roundup_correction / (max_no_of_observations+1)
corrected_timestamp = None
for series in range(max_no_of_observations):
row = []
if series == 0:
row.append(datetime.datetime.fromtimestamp(rounded_timestamp).strftime(options.timeformat))
corrected_timestamp = rounded_timestamp
else:
corrected_timestamp += interval
row.append(datetime.datetime.fromtimestamp(corrected_timestamp).strftime(options.timeformat))
for comparison_name, data_row in zip(options.generate_comparison, data):
if len(data_row) > series:
row.append(data_row[series][2]) # value
row.append(data_row[series][3]) # stdev
offsets[comparison_name].append(datetime.datetime.fromtimestamp(data_row[series][1]).strftime(options.timeformat)) # save the real date
else:
row.append('NaN')
row.append('NaN')
writer.writerow(row)
js_blocks.append(one_block % {'comparison_id': report_name,
'test_name': test_name,
'offsets': simplejson.dumps(offsets),
'csv_table': str(tablify(report_name + '.csv', dygraph_format=True))
})
aggregated.append(report_name)
top_comparison_name = 'comparison-%s-aggregate-report' % ('_'.join(options.generate_comparison))
# now, interesting experiment - create an aggreagte view of all
# measurements (arithmetic mean of their means, and stdevs)
aggr_data = {}
for aggr_name in aggregated:
for measurement in csv_reader(aggr_name + '.csv'):
for data in measurement.get_measurements():
if data.date not in aggr_data:
aggr_data[data.date] = {}
pointer = aggr_data[data.date]
if data.series_name not in pointer:
pointer[data.series_name] = {'value':[], 'stdev': []}
pointer = pointer[data.series_name]
pointer['value'].append(data.value)
pointer['stdev'].append(data.stdev)
# sort by date
# pprint(aggr_data)
aggr_data = sorted(aggr_data.items(), key=lambda x: x[0])
offsets = {}
with csv_writer(top_comparison_name + '.csv', ['Date'] + options.generate_comparison) as writer:
for date, values in aggr_data:
row = [date]
for ser_name in options.generate_comparison:
if ser_name in values:
row.append('%0.3f' % (sum(values[ser_name]['value'])/len(values[ser_name]['value']),))
row.append('%0.3f' % (sum(values[ser_name]['stdev'])/len(values[ser_name]['stdev']),))
else:
row.append('NaN')
row.append('NaN')
writer.writerow(row)
js_blocks.insert(0, one_block % {'comparison_id': top_comparison_name,
'test_name': top_comparison_name,
'offsets': simplejson.dumps(offsets),
'csv_table': str(tablify(top_comparison_name + '.csv', dygraph_format=True))
})
with open(options.script_home + '/html/comparison-view.tmpl', 'r') as t:
tmpl = t.read()
kwargs = load_tmpl_kwargs(options)
kwargs['blocks'] = '\n'.join(js_blocks)
kwargs['first_test'] = options.generate_comparison[0]
with open(top_comparison_name + '.html', 'w') as w:
w.write(tmpl % kwargs)
generate_includes(options, force=True)
class JMeterResults(dict):
def __init__(self, *args):
dict.__init__(self, args)
self['tests'] = {}
def add_datapoint(self, name, metric_name, datapoint):
if name not in self['tests']:
self['tests'][name] = {}
self['tests'][name][metric_name] = datapoint
def get_tests(self):
tests = []
for k, v in self['tests'].items():
tests.append((k, v))
return sorted(tests, key=lambda x: x[0])
def get_test_by_name(self, name):
if name in self['tests']:
return self['tests'][name]
else:
return None
class Table:
def __init__(self, *columns):
self.columns = columns
self.length = max(len(col.data) for col in columns)
def get_col_byname(self, name):
for col in self.columns:
if col.name == name:
return col.data
def get_row_byvalue(self, column_name, value):
data = []
for col in self.columns:
if col.name == column_name:
for val, i in zip(col.data, range(len(col.data))):
if val == value:
for col in self.columns:
data.append(col.data[i])
return data
def get_row(self, rownum=None):
for col in self.columns:
if rownum is None:
yield col.format % col.name
else:
yield col.format % col.data[rownum]
def get_line(self):
for col in self.columns:
yield '-' * (col.width + 2)
def join_n_wrap(self, char, elements):
return ' ' + char + char.join(elements) + char
def get_rows(self):
yield self.join_n_wrap('+', self.get_line())
yield self.join_n_wrap('|', self.get_row(None))
yield self.join_n_wrap('+', self.get_line())
for rownum in range(0, self.length):
yield self.join_n_wrap('|', self.get_row(rownum))
yield self.join_n_wrap('+', self.get_line())
def __str__(self):
return '\n'.join(self.get_rows())
class Column():
LEFT, RIGHT = '-', ''
def __init__(self, name, data, align=RIGHT):
self.data = data
self.name = name
self.width = max(len(name), max(len(x) for x in data))
self.format = ' %%%s%ds ' % (align, self.width)
class Measurement(object):
def __init__(self, date, count, col_names, values):
self.date = date
self.count = count
self.values = values
self.col_names = col_names
assert len(values) % 2 == 0
def get_number_of_cols(self):
return len(self.values) / 2
def get_measurements(self):
i = 1
for x in range(0, len(self.values), 2):
yield DataPoint(self.date, self.count, self.col_names[i], self.values[x], self.values[x+1])
i += 1
class DataPoint(object):
def __init__(self, date, count, series_name, value, stdev):
self.date = date
self.count = count
self.series_name = series_name
self.value = value and float(value) or 0.0
self.stdev = stdev and float(stdev) or 0.0
def main(argv):
check_basics()
if not os.path.exists(os.path.join(INSTDIR, _NAME)):
run_cmd(['mkdir', os.path.join(INSTDIR, _NAME)])
parser = get_arg_parser()
options, args = parser.parse_args(argv)
check_options(options, args)
with changed_dir(os.path.join(INSTDIR, _NAME)):
update_pid = get_pid('%s/solrjmeter.pid' % options.results_folder)
if update_pid != -1 and check_pid_is_running(update_pid):
error("The script is already running with pid: %s" % update_pid)
if options.purge and os.path.exists(options.results_folder):
run_cmd(['rm', '-fr', options.results_folder])
if not os.path.exists(options.results_folder):
run_cmd(['mkdir', options.results_folder])
acquire_lock('%s/solrjmeter.pid' % options.results_folder)
# install pre-requisities if requested
check_prerequisities(options)
if options.debug or True: # for now always print them
print "============="
for k,v in options.__dict__.items():
if 'password' in k:
print '%s=%s' % (k, 'xxx')
else:
print '%s=%s' % (k, v)
print 'args=', args
print "============="
if options.generate_queries is not None:
if not os.path.exists('queries'):
run_cmd(['mkdir queries'])
with changed_dir('queries'):
generate_queries(options)
if options.regenerate_html:
with changed_dir(options.results_folder):
regenerate_html(options)
if len(args) > 1:
tests = args[1:]
else:
tests = find_tests(options)
if len(tests) == 0:
print 'WARNING: no test name(s) supplied nor found in: %s' % options.queries_pattern
else:
with changed_dir(options.results_folder):
results = JMeterResults()
previous_results = JMeterResults()
all_tests = sorted(filter(lambda y: '.' in y, filter(lambda x: os.path.isdir(x), os.listdir('.'))))
if len(all_tests) > 0:
print 'Reading results of the previous test'
with changed_dir(all_tests[-1]):
for prev_dir in filter(lambda x: os.path.isdir(x), os.listdir('.')):
try:
with changed_dir(prev_dir):
harvest_results(prev_dir, previous_results)
except IOError:
print 'Error reading: ' + prev_dir
if options.save:
generate_includes(options)
if not os.path.exists(options.today_folder):
run_cmd(['mkdir', options.today_folder])
with changed_dir(options.today_folder):
if options.run_command_before:
run_cmd([options.run_command_before])
if options.save:
runtime = {}
runtime.update(options.__dict__)
runtime['google_password'] = 'XXX'
save_into_file('runtime-env.json', simplejson.dumps(runtime))
before_test = harvest_details_about_montysolr(options)
save_into_file('before-test.json', simplejson.dumps(before_test))
i = 0
for test in tests:
i += 1
print 'Running (%s/%s): %s' % (i, len(tests), test)
test_name = os.path.basename(test)
test_dir = test_name
if not os.path.exists(test_dir):
run_cmd(['mkdir', test_dir])
with changed_dir(test_dir):
run_test(test, options)
generate_graphs(options)
generate_one_run_html(test_name, options)
harvest_results(test_name, results)
if options.save:
after_test = harvest_details_about_montysolr(options)
save_into_file('after-test.json', simplejson.dumps(after_test))
save_results(options, results)
generate_today_dashboard(options, results)
if options.run_command_after:
run_cmd([options.run_command_after])
if options.save:
update_global_dashboard(options, results, previous_results)
regenerate_global_dashboard(options)
if options.upload:
upload_results(options, results)
if options.generate_comparison:
generate_top_level_comparison(options)
remove_lock('%s/solrjmeter.pid' % options.results_folder)
if __name__ == '__main__':
main(sys.argv)
|
linux_adapter.py
|
import array
import fcntl
import socket
import struct
import threading
from bleson.core.hci.constants import *
from bleson.core.hci.type_converters import AdvertisingDataConverters, parse_hci_event_packet, hex_string
from bleson.core.types import Device, BDAddress
from bleson.interfaces.adapter import Adapter
from bleson.logger import log
from .constants import *
class BluetoothHCIAdapter(Adapter):
def __init__(self, device_id=0):
self.device_id = device_id
self._keep_running = True
self._socket = None
self._socket_poll_thread = None
# User callbacks
self.on_advertising_data = None
def __del__(self):
self._keep_running = False
def open(self):
self._socket = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
self._socket.bind((self.device_id,))
self._socket_poll_thread = threading.Thread(target=self._socket_poller, name='HCISocketPoller')
self._socket_poll_thread.setDaemon(True)
self._socket_poll_thread.start()
self.device = self.get_device_info()
def close(self):
self._socket.close()
def send_cmd(self, cmd, data):
arr = array.array('B', data)
fcntl.ioctl(self._socket.fileno(), cmd, arr)
return arr
def send_cmd_value(self, cmd, value):
fcntl.ioctl(self._socket.fileno(), cmd, value)
def write_buffer(self, data):
log.debug(data)
self._socket.send(data)
def _set_filter(self, data):
self._socket.setsockopt(socket.SOL_HCI, socket.HCI_FILTER, data)
def _socket_poller(self):
while self._keep_running:
data = self._socket.recv(1024) # blocking
try:
self._on_data(data)
except Exception:
log.exception("Exception ignored in HCISocketPoller")
# Adapter INfo
def get_device_info(self):
# C hci_dev_info struct defined at https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n2382
hci_dev_info_struct = struct.Struct('=H 8s 6B L B 8B 3L 4I 10L')
request_dta = hci_dev_info_struct.pack(
self.device_id,
b'',
0, 0, 0, 0, 0, 0,
0,
0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
response_data = self.send_cmd(HCIGETDEVINFO, request_dta)
hci_dev_info = hci_dev_info_struct.unpack(response_data)
# Just extract a few parts for now
device_id = hci_dev_info[0]
device_name = hci_dev_info[1].split(b'\0',1)[0]
bd_address = hci_dev_info[2:8]
return Device(address=BDAddress(bd_address), name=device_name)
# -------------------------------------------------
# Adapter power control
def on(self):
self.send_cmd_value(HCIDEVUP, self.device_id)
def off(self):
self.send_cmd_value(HCIDEVDOWN, self.device_id)
# -------------------------------------------------
# Scanning
def set_scan_filter(self):
typeMask = 1 << HCI_EVENT_PKT
eventMask1 = (1 << EVT_CMD_COMPLETE) | (1 << EVT_CMD_STATUS)
eventMask2 = 1 << (EVT_LE_META_EVENT - 32)
opcode = 0
filter = struct.pack("<LLLH", typeMask, eventMask1, eventMask2, opcode)
self._set_filter(filter)
def set_scan_parameters(self):
len = 7
type = SCAN_TYPE_ACTIVE
internal = 0x0010 # ms * 1.6
window = 0x0010 # ms * 1.6
own_addr = LE_PUBLIC_ADDRESS
filter = FILTER_POLICY_NO_WHITELIST
cmd = struct.pack("<BHBBHHBB", HCI_COMMAND_PKT, LE_SET_SCAN_PARAMETERS_CMD, len,
type, internal, window, own_addr, filter )
self.write_buffer(cmd)
def set_scan_enable(self, enabled=False, filter_duplicates=False):
len = 2
enable = 0x01 if enabled else 0x00
dups = 0x01 if filter_duplicates else 0x00
cmd = struct.pack("<BHBBB", HCI_COMMAND_PKT, LE_SET_SCAN_ENABLE_CMD, len, enable, dups)
self.write_buffer(cmd)
# -------------------------------------------------
# Advertising
def set_advertising_filter(self):
typeMask = 1 << HCI_EVENT_PKT | (1 << HCI_ACLDATA_PKT)
eventMask1 = 1 << EVT_DISCONN_COMPLETE | (1 << EVT_CMD_COMPLETE) | (1 << EVT_CMD_STATUS)
eventMask2 = 1 << (EVT_LE_META_EVENT - 32)
opcode = 0
filter = struct.pack("<LLLH", typeMask, eventMask1, eventMask2, opcode)
self._set_filter(filter)
def set_advertise_enable(self, enabled):
cmd = struct.pack("<BHBB",
HCI_COMMAND_PKT,
LE_SET_ADVERTISE_ENABLE_CMD,
1, # cmd parameters length
0x01 if enabled else 0x00
)
self.write_buffer(cmd)
def set_advertising_parameter(self):
cmd = struct.pack("<BHB" + "H H 3B 6B B B",
HCI_COMMAND_PKT,
LE_SET_ADVERTISING_PARAMETERS_CMD,
15, # cmd parameters length
0x00a0, # min interval
0x00a0, # max interval
0, # adv type
0, # direct addr type
0, # direct addr type
0,0,0,0,0,0, # direct addr
0x07,
0x00
)
self.write_buffer(cmd)
def set_scan_response_data(self, data):
padded_data = memoryview(data).tolist()
padded_data.extend([0] * (31 - len(padded_data)))
cmd = struct.pack("<BHB" + "B31B",
HCI_COMMAND_PKT,
LE_SET_SCAN_RESPONSE_DATA_CMD,
32, # cmd parameters length
len(data),
*padded_data
)
self.write_buffer(cmd)
def set_advertising_data(self, data):
padded_data = memoryview(data).tolist()
padded_data.extend([0] * (31 - len(padded_data)))
cmd = struct.pack("<BHB" + "B31B",
HCI_COMMAND_PKT,
LE_SET_ADVERTISING_DATA_CMD,
32, # cmd parameters length
len(data),
*padded_data
)
self.write_buffer(cmd)
# -------------------------------------------------
# Inbound event handler for all messages
def _handle_command_complete(self, data):
log.debug("EVT_CMD_COMPLETE")
# TODO: unpack based on: https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n1853
if (data[5] << 8) + data[4] == LE_SET_SCAN_PARAMETERS_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Scan Parameters Set');
elif (data[5] << 8) + data[4] == LE_SET_SCAN_ENABLE_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Scan Enable Set')
elif (data[5] << 8) + data[4] == LE_SET_ADVERTISING_PARAMETERS_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Advertising Parameters Set')
elif (data[5] << 8) + data[4] == LE_SET_ADVERTISING_DATA_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Advertising Data Set')
elif (data[5] << 8) + data[4] == LE_SET_SCAN_RESPONSE_DATA_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Scan Response Data Set')
elif (data[5] << 8) + data[4] == LE_SET_ADVERTISE_ENABLE_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Advertise Enable Set')
def _handle_disconnection_complete(self, data):
log.debug("EVT_DISCONN_COMPLETE")
disconn_info = dict(
status=data[3],
handle=(data[5] << 8) + data[4],
reason=data[6]
)
log.debug(disconn_info)
def _handle_meta_event(self, hci_packet):
log.debug("EVT_LE_META_EVENT")
if hci_packet.subevent_code == EVT_LE_ADVERTISING_REPORT:
log.debug('LE Advertising Report')
if self.on_advertising_data:
advertisement = AdvertisingDataConverters.from_hcipacket(hci_packet)
self.on_advertising_data(advertisement)
else:
log.warning("TODO: unhandled HCI Meta Event packet, type={}".format(hci_packet))
def _on_data(self, data):
log.debug("----------------------------------------------------------------------")
log.debug("Socket data: len={}, data={}".format(len(data), hex_string(data)))
if data[0] == HCI_EVENT_PKT:
hci_event_packet = parse_hci_event_packet(data[1:])
log.debug(hci_event_packet)
if data[1] == EVT_CMD_COMPLETE:
self._handle_command_complete(data)
elif data[1] == EVT_DISCONN_COMPLETE:
self._handle_disconnection_complete(data)
elif data[1] == EVT_LE_META_EVENT:
self._handle_meta_event(hci_event_packet)
else:
log.warning("TODO: unhandled HCI Event packet, type={}".format(hci_event_packet))
else:
log.warning("TODO: Unhandled HCI packet, type={}".format(data[0]))
def start_scanning(self):
self.set_scan_enable(False)
self.set_scan_filter()
self.set_scan_parameters()
self.set_scan_enable(True, False)
def stop_scanning(self):
self.set_scan_enable(False)
def start_advertising(self, advertisement, scan_response=None):
self.set_advertising_filter()
self.set_advertise_enable(False)
self.set_advertising_parameter()
self.set_advertising_data(AdvertisingDataConverters.from_advertisement(advertisement))
if scan_response:
log.warning("TODO: support setting scan response")
# self.set_scan_response_data(scan_response_data.data)
self.set_advertise_enable(True)
def stop_advertising(self):
self.set_advertise_enable(False)
|
test_tune_restore.py
|
# coding: utf-8
import signal
from collections import Counter
import multiprocessing
import os
import shutil
import tempfile
import threading
import time
from typing import List
import unittest
import ray
from ray import tune
from ray._private.test_utils import recursive_fnmatch
from ray.exceptions import RayTaskError
from ray.rllib import _register_all
from ray.tune import TuneError
from ray.tune.callback import Callback
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest import Searcher
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.utils import validate_save_restore
from ray.tune.utils.mock_trainable import MyTrainableClass
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
def testPostRestoreCheckpointExistence(self):
"""Tests that checkpoint restored from is not deleted post-restore."""
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2},
checkpoint_freq=1,
keep_checkpoints_num=1,
restore=self.checkpoint_path,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
self.assertTrue(os.path.isfile(self.checkpoint_path))
# Defining the callbacks at the file level, so they can be pickled and spawned
# in a separate process.
class SteppingCallback(Callback):
def __init__(self, driver_semaphore, trainer_semaphore):
self.driver_semaphore = driver_semaphore
self.trainer_semaphore = trainer_semaphore
def on_step_end(self, iteration, trials, **info):
self.driver_semaphore.release() # Driver should continue
self.trainer_semaphore.acquire() # Wait until released
def _run(local_dir, driver_semaphore, trainer_semaphore):
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
local_dir=local_dir,
name="interrupt",
callbacks=[SteppingCallback(driver_semaphore, trainer_semaphore)],
)
class TuneInterruptionTest(unittest.TestCase):
def testExperimentInterrupted(self):
local_dir = tempfile.mkdtemp()
# Unix platforms may default to "fork", which is problematic with
# multithreading and GRPC. The child process should always be spawned.
mp_ctx = multiprocessing.get_context("spawn")
driver_semaphore = mp_ctx.Semaphore()
trainer_semaphore = mp_ctx.Semaphore()
process = mp_ctx.Process(
target=_run,
args=(local_dir, driver_semaphore, trainer_semaphore),
name="tune_interrupt",
)
process.daemon = False
process.start()
exp_dir = os.path.join(local_dir, "interrupt")
# Skip first five steps
for i in range(5):
driver_semaphore.acquire() # Wait for callback
trainer_semaphore.release() # Continue training
driver_semaphore.acquire()
experiment_state_file = None
for file in os.listdir(exp_dir):
if file.startswith("experiment_state"):
experiment_state_file = os.path.join(exp_dir, file)
break
self.assertTrue(experiment_state_file)
last_mtime = os.path.getmtime(experiment_state_file)
# Now send kill signal
os.kill(process.pid, signal.SIGINT)
# Release trainer. It should handle the signal and try to
# checkpoint the experiment
trainer_semaphore.release()
time.sleep(2) # Wait for checkpoint
new_mtime = os.path.getmtime(experiment_state_file)
self.assertNotEqual(last_mtime, new_mtime)
shutil.rmtree(local_dir)
def testInterruptDisabledInWorkerThread(self):
# https://github.com/ray-project/ray/issues/22295
# This test will hang without the proper patch because tune.run will fail.
event = threading.Event()
def run_in_thread():
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
)
event.set()
thread = threading.Thread(target=run_in_thread)
thread.start()
event.wait()
thread.join()
ray.shutdown()
os.environ.pop("TUNE_DISABLE_SIGINT_HANDLER", None)
class TuneFailResumeGridTest(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, num_trials=20):
self.num_trials = num_trials
def on_step_end(self, trials, **kwargs):
if len(trials) == self.num_trials:
print(f"Failing after {self.num_trials} trials.")
raise RuntimeError
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
class CheckTrialResourcesCallback(Callback):
"""Checks if pending trials are requesting the right amount of
resources.
The check happens exactly once after `check_after` number of calls
to on_step_begin(). Note, we deliberately delay the check to after
`check_after` number of steps. This is because when we start a
tuning job from fresh (rather than restored), trial list is still
empty - any check now would be trivial and thus wasted.
"""
def __init__(self, expected_cpu: int, check_after: int = 1):
self._expected_cpu = expected_cpu
self._checked = False
self._check_after = check_after
def on_step_begin(self, iteration: int, trials: List["Trial"], **info):
if not self._checked and iteration >= self._check_after:
for trial in trials:
if trial.status == Trial.PENDING:
assert (
trial.placement_group_factory.required_resources.get(
"CPU", 0
)
== self._expected_cpu
)
self._checked = True
def setUp(self):
self.logdir = tempfile.mkdtemp()
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
# Change back to local_mode=True after this is resolved:
# https://github.com/ray-project/ray/issues/13932
ray.init(local_mode=False, num_cpus=2)
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
shutil.rmtree(self.logdir)
ray.shutdown()
def testFailResumeGridSearch(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run("trainable", callbacks=[self.FailureInjectorCallback()], **config)
analysis = tune.run(
"trainable", resume=True, callbacks=[self.CheckStateCallback()], **config
)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
# Unfinished trials' resources should be updated.
def testResourceUpdateInResume(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[
self.FailureInjectorCallback(),
self.CheckTrialResourcesCallback(1),
],
**config,
)
analysis = tune.run(
"trainable",
resume=True,
resources_per_trial={"cpu": 2},
callbacks=[self.CheckTrialResourcesCallback(2)],
**config,
)
assert len(analysis.trials) == 27
def testFailResumeWithPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config,
)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config,
)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="trainable",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
local_dir=self.logdir,
)
)
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True,
)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True,
)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertWarnsRegex(UserWarning, "exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable", callbacks=[self.FailureInjectorCallback(10)], **config
)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testHyperbandExample(self):
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run("__fake", name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class SearcherTest(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
class WorkingDirectoryTest(unittest.TestCase):
def testWorkingDir(self):
"""Trainables should know the original working dir on driver through env
variable."""
working_dir = os.getcwd()
def f(config):
assert os.environ.get("TUNE_ORIG_WORKING_DIR") == working_dir
tune.run(f)
class TrainableCrashWithFailFast(unittest.TestCase):
def test(self):
"""Trainable crashes with fail_fast flag and the original crash message
should bubble up."""
def f(config):
tune.report({"a": 1})
time.sleep(0.1)
raise RuntimeError("Error happens in trainable!!")
with self.assertRaisesRegex(RayTaskError, "Error happens in trainable!!"):
tune.run(f, fail_fast=TrialRunner.RAISE)
# For some reason, different tests are coupled through tune.registry.
# After running `ResourceExhaustedTest`, there is always a super huge `training_func` to
# be put through GCS, which will fail subsequent tests.
# tldr, make sure that this test is the last test in the file.
class ResourceExhaustedTest(unittest.TestCase):
def test_resource_exhausted_info(self):
"""This is to test if helpful information is displayed when
the objects captured in trainable/training function are too
large and RESOURCES_EXHAUSTED error of gRPC is triggered."""
# generate some random data to be captured implicitly in training func.
from sklearn.datasets import fetch_olivetti_faces
a_large_array = []
for i in range(10):
a_large_array.append(fetch_olivetti_faces())
def training_func(config):
for item in a_large_array:
assert item
with self.assertRaisesRegex(
TuneError,
"The Trainable/training function is too large for grpc resource limit.",
):
tune.run(training_func)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
|
pipes.py
|
import struct
import msgpack
import threading
class RemoteError(Exception):
def __init__(self, name, human_msg, human_traceback):
Exception.__init__(self)
self.name = name
self.msg = human_msg
self.traceback = human_traceback
def __str__(self):
return f"{self.name}: {self.msg}\n{self.traceback}"
class PipeRpcCaller:
def __init__(self, on_connection_lost):
self.handler = None
self.call_id = 0
self.on_connection_lost = on_connection_lost
self.max_retries = 5
def connect(self, pipe_name):
self.handler = open(f"\\\\.\\pipe\\{pipe_name}", "r+b", 0)
self._flush()
def is_connected(self):
return self.handler is not None
def _flush(self):
try:
# Send a dummy ping call, but read all of the steam until EOF
# This forces the stream to be reset
# Yes, it slows initial load a bit, but this is fine in our case
packed = msgpack.packb(
{"msg_id": 0, "data": ["ping", {"pong": False}]})
self.handler.write(struct.pack("I", len(packed)))
self.handler.write(packed)
self.handler.read(2**32)
except:
print(f"Flushed!")
def close(self):
if self.handler is None:
return
try:
h = self.handler
self.handler = None
h.close()
self.call_id = 0
print("Handler closed!")
except Exception as e:
print("Error while closing the handler!")
print(e)
def __call__(self, method, *args, **kargs):
result, err = self.try_invoke(method, *args)
if err:
self.on_connection_lost.emit()
return result
def try_invoke(self, method, *args):
return self._try_invoke_internal(method, 1, *args)
def _try_invoke_internal(self, method, try_count, *args):
if isinstance(method, bytes):
method = method.decode('utf-8')
print(f"Calling {method}")
self.call_id += 1
obj = {
"msg_id": self.call_id,
"data": [
"call",
{
"method": method,
"args": args
}
]
}
packed = msgpack.packb(obj)
try:
self.handler.write(struct.pack("I", len(packed)))
self.handler.write(packed)
# Advance the cursor by reading the packed bytes
# Has to be done because it's not advanced automatically for some reason
self.handler.read(len(packed))
response_len = struct.unpack("I", self.handler.read(4))[0]
response_packed = self.handler.read(response_len)
except:
self.close()
return (None, True)
try:
response = msgpack.unpackb(response_packed, raw=False)
except msgpack.ExtraData as e:
if try_count >= self.max_retries:
print(f"Forcing to fail after {try_count} tries.")
return (None, True)
print(f"Received extra data! Flushing and retrying. More info: {e}")
self._flush()
return self._try_invoke_internal(method, try_count + 1, *args)
response_type = response["data"][0]
if response_type == "response":
return (response["data"][1]["result"], False)
elif response_type == "error":
error_info = response["data"][1]
raise RemoteError(
error_info["err_name"], error_info["err_message"], error_info["stack_trace"])
else:
raise RemoteError("InvalidResponse",
"The response from the method is invalid", "")
def __getattr__(self, method):
return lambda *args, **kargs: self(method, *args, **kargs)
class PipedEventHandler:
def __init__(self, name, on_connection_lost):
self.name = name
self.on_connection_lost = on_connection_lost
self.event_handlers = {}
self.running = False
self.event_loop = None
def start_polling(self):
if self.running:
return
self.running = True
self.event_loop = threading.Thread(target=self._loop)
self.event_loop.start()
def stop_polling(self):
if not self.running:
return
self.running = False
self.event_loop.join()
self.event_loop = None
def on(self, event, handler):
if event not in self.event_handlers:
self.event_handlers[event] = [handler]
else:
self.event_handlers[event].append(handler)
def _loop(self):
print(f"Connecting to {self.name}")
try:
f = None
f = open(f"\\\\.\\pipe\\{self.name}", "r+b", 0)
print("Connected event handler!")
while self.running:
n = struct.unpack("I", f.read(4))[0]
data = f.read(n)
obj = msgpack.unpackb(data, raw=False)
if obj["data"][0] == "call" and obj["data"][1]["method"] == "emit":
args = obj["data"][1]["args"][0]
for evt_args in args:
print(
f"Event {evt_args['event_name']}")
if evt_args["event_name"] in self.event_handlers:
for handler in self.event_handlers[evt_args["event_name"]]:
handler.emit(evt_args["args"])
f.close()
except:
if f is not None:
f.close()
self.running = False
self.event_loop = None
self.on_connection_lost.emit()
|
midi2cv.py
|
# midi2cv.py
#
# https://github.com/schollz/midi2cv
#
# convert incoming midi signals to a calibrated voltage
#
# run 'python3 midi2cv.py --tune' to generate a calibraiton
# run 'python3 midi2cv.py --play' to listen to midi
#
import sys
import threading
import time
import os
import json
import math
from subprocess import Popen, PIPE, STDOUT
from signal import signal, SIGINT
import click
import numpy as np
from loguru import logger
import mido
from mido.ports import MultiPort
import termplotlib as tpl
# rail-to-rail voltage
# set this with `--vdd`
rail_to_rail_vdd = 5.2
voltage_adjustment = 0
mb = [1.51, 1.38]
keys_on = 0
#
# mcp4725 functions
#
def init_mcp4725():
global i2c, dac
import board
import busio
import adafruit_mcp4725
i2c = busio.I2C(board.SCL, board.SDA)
dac = adafruit_mcp4725.MCP4725(i2c)
set_voltage(0)
def set_voltage(volts):
global dac
volts += voltage_adjustment
# logger.info("setting voltage={}", volts)
if volts >= 0 and volts < rail_to_rail_vdd:
dac.value = int(round(float(volts) / rail_to_rail_vdd * 65535.0))
#
# frequency / midi / voltage conversoins
#
def freq_to_voltage(freq):
return mb[0] * math.log(freq) + mb[1]
def note_to_freq(note):
a = 440 # frequency of A (common value is 440Hz)
return (a / 32) * (2 ** ((note - 9) / 12))
def note_to_voltage(note):
return freq_to_voltage(note_to_freq(note))
def match_note_to_freq(freq):
closetNote = -1
closestAmount = 10000
closestFreq = 10000
for note in range(1, 90):
f = note_to_freq(note)
if abs(f - freq) < closestAmount:
closestAmount = abs(f - freq)
closetNote = note
closestFreq = f
return closetNote
#
# calibration / tuning
#
def do_tuning():
print(
"""note! before tuning...
- ...make sure that your synth is connected
via the USB audio adapter line-in.
- ...make sure that your synth outputs only
pure tones (turn off effects!).
"""
)
for i in range(5):
print("initiating tuning in {}".format(5 - i), end="\r")
time.sleep(1)
voltage_to_frequency = {}
previous_freq = 0
for voltage in range(260, int(rail_to_rail_vdd * 80), 5):
voltage = float(voltage) / 100.0
freq = sample_frequency_at_voltage(voltage)
if freq < previous_freq:
continue
voltage_to_frequency[voltage] = freq
previous_freq = freq
os.system("clear")
plot_points(voltage_to_frequency)
with open("voltage_to_frequency.json", "w") as f:
f.write(json.dumps(voltage_to_frequency))
def plot_points(voltage_to_frequency):
x = []
y0 = []
for k in voltage_to_frequency:
x.append(float(k))
y0.append(voltage_to_frequency[k])
fig = tpl.figure()
print("\n")
fig.plot(
x,
y0,
plot_command="plot '-' w points",
width=50,
height=20,
xlabel="voltage (v)",
title="frequency (hz) vs voltage",
)
fig.show()
print("\n")
def load_tuning():
global mb
voltage_to_frequency = json.load(open("voltage_to_frequency.json", "rb"))
x = []
y = []
y0 = []
for k in voltage_to_frequency:
x.append(float(k))
y0.append(voltage_to_frequency[k])
y.append(math.log(voltage_to_frequency[k]))
mb = np.polyfit(y, x, 1)
fig = tpl.figure()
print("\n")
fig.plot(
x,
y0,
plot_command="plot '-' w points",
width=60,
height=22,
xlabel="voltage (v)",
title="frequency (hz) vs voltage",
label="freq = exp((volts{:+2.2f})/{:2.2f}) ".format(mb[1], mb[0]),
)
fig.show()
print("\n")
time.sleep(1)
# plx.scatter(x, y,cols=80,rows=10,xlim=[np.min(x), np.max(x)])
# plx.show()
def check_tuning():
adjustment = []
cents_off = []
for i in range(60, 80, 2):
freq = sample_frequency_at_voltage(note_to_voltage(i))
cents = 1200 * np.log2(note_to_freq(i) / freq)
print(
"midi={}, target={:2.1f} hz, observed={:2.1f} hz, cents off={:2.1f} cents".format(
i, note_to_freq(i), freq, cents
)
)
cents_off.append(cents)
cents_off_mean = np.mean(cents_off)
cents_off_sd = np.std(cents_off)
print("mean cents off: {0:+} +/- {0:+}".format(cents_off_mean, cents_off_sd))
def sample_frequency_at_voltage(voltage):
set_voltage(voltage)
time.sleep(1.0)
freq = get_frequency_analysis()
# print("{:2.2f} hz at {:2.2f} volt".format(freq, voltage))
return freq
def get_frequency_analysis():
cmd = "arecord -d 1 -f cd -t wav -D sysdefault:CARD=1 /tmp/1s.wav"
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
if b"Recording WAVE" not in output:
raise output
# cmd = "sox /tmp/1s.wav -n stat -freq"
cmd = "aubio pitch -m schmitt -H 1024 /tmp/1s.wav"
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
with open("/tmp/1s.dat", "wb") as f:
f.write(output)
freq = analyze_aubio()
return freq
def analyze_aubio():
gathered_freqs = []
with open("/tmp/1s.dat", "r") as f:
linenum = 0
for line in f:
linenum += 1
if linenum < 5:
continue
s = line.split()
if len(s) != 2:
continue
freq = float(s[1])
if freq > 100:
gathered_freqs.append(freq)
if len(gathered_freqs) == 0:
return -1
avg = np.median(gathered_freqs)
return avg
def analyze_sox():
previous_amp = 0
previous_freq = 0
gathering = -1
gathered_freqs = []
gathered_amps = []
known_frequencies = {}
known_powers = {}
with open("/tmp/1s.dat") as f:
for line in f:
line = line.strip()
if ":" in line:
continue
nums = line.split()
if len(nums) > 2:
continue
amp = float(nums[1])
freq = float(nums[0])
if amp > 10 and amp > previous_amp:
gathering = 0
if gathering == 0:
gathered_amps = []
gathered_freqs = []
gathered_freqs.append(previous_freq)
gathered_amps.append(previous_amp)
if gathering > -1:
gathering += 1
gathered_freqs.append(freq)
gathered_amps.append(amp)
if gathering == 3:
gathering = -1
freq_power = np.sum(gathered_amps)
freq_average = (
np.sum(np.multiply(gathered_amps, gathered_freqs)) / freq_power
)
found = False
for f in known_frequencies:
if freq_average < f * 0.92 or freq_average > f * 1.08:
continue
found = True
known_frequencies[f].append(freq_average)
known_powers[f].append(freq_power)
if not found:
known_frequencies[freq_average] = [freq_average]
known_powers[freq_average] = [freq_power]
previous_freq = freq
previous_amp = amp
freq_and_power = {}
for f in known_frequencies:
freq_and_power[np.mean(known_frequencies[f])] = np.mean(known_powers[f])
for i, v in enumerate(
sorted(freq_and_power.items(), key=lambda x: x[1], reverse=True)
):
if i == 1:
return v[0]
return -1
#
# midi listeners
#
def midi(name):
global keys_on
logger.info("listening on '{}'", name)
with mido.open_input(name) as inport:
name = name.split()
if len(name) > 2:
name = " ".join(name[:2])
else:
name = " ".join(name)
name = name.lower()
for msg in inport:
if msg.type == "note_on":
logger.info(f"[{name}] {msg.type} {msg.note} {msg.velocity}")
set_voltage(note_to_voltage(msg.note))
keys_on += 1
elif msg.type == "note_off":
keys_on -= 1
if keys_on == 0:
set_voltage(0)
def listen_for_midi():
inputs = mido.get_input_names()
for name in inputs:
t = threading.Thread(target=midi, args=(name,))
t.daemon = True
t.start()
while True:
time.sleep(10)
#
# cli
#
def handler(signal_received, frame):
try:
set_voltage(0)
except:
pass
logger.info("exiting")
sys.exit(0)
@click.command()
@click.option("--vdd", help="set the rail-to-rail voltage", default=5.2)
@click.option("--tune", help="activate tuning", is_flag=True, default=False)
@click.option("--play", help="initialize playing", is_flag=True, default=False)
@click.option("--adj", help="adjust voltage", default=0.0)
@click.option("--do", help="runs a function (debugging)", is_flag=True, default=False)
@click.option(
"--noinit",
help="do not intiialize mcp4725 (debugging)",
is_flag=True,
default=False,
)
def gorun(tune, play, vdd, noinit, adj, do):
signal(SIGINT, handler)
global rail_to_rail_vdd
global voltage_adjustment
rail_to_rail_vdd = vdd
voltage_adjustment = adj
if not noinit:
init_mcp4725()
if tune:
do_tuning()
check_tuning()
if play:
load_tuning()
listen_for_midi()
if __name__ == "__main__":
logger.remove()
logger.add(
sys.stderr,
format="<green>{time:HH:mm:ss}</green> (<cyan>{function}:{line}</cyan>) - {message}",
)
print(
"""
███╗ ███╗██╗██████╗ ██╗ ██████╗ ██████╗██╗ ██╗
████╗ ████║██║██╔══██╗██║ ╚════██╗ ██╔════╝██║ ██║
██╔████╔██║██║██║ ██║██║ █████╔╝ ██║ ██║ ██║
██║╚██╔╝██║██║██║ ██║██║ ██╔═══╝ ██║ ╚██╗ ██╔╝
██║ ╚═╝ ██║██║██████╔╝██║ ███████╗ ╚██████╗ ╚████╔╝
╚═╝ ╚═╝╚═╝╚═════╝ ╚═╝ ╚══════╝ ╚═════╝ ╚═══╝
version v0.2.0 (github.com/schollz/midi2cv)
convert any incoming midi signal into a control voltage
from your raspberry pi.
"""
)
gorun()
|
__init__.py
|
import re, threading, time, traceback
from .DouYu import DouYuDanMuClient
from .Panda import PandaDanMuClient
from .ZhanQi import ZhanQiDanMuClient
from .QuanMin import QuanMinDanMuClient
from .Bilibili import BilibiliDanMuClient
from .HuoMao import HuoMaoDanMuClient
from .log import set_logging
from .config import VERSION
__version__ = VERSION
__all__ = ['DanMuClient']
class DanMuClient(object):
def __init__(self, url):
self.__url = ''
self.__baseClient = None
self.__client = None
self.__functionDict = {'default': lambda x: 0}
self.__isRunning = False
if 'http://' == url[:7]:
self.__url = url
else:
self.__url = 'http://' + url
for u, bc in {'panda.tv' : PandaDanMuClient,
'douyu.com' : DouYuDanMuClient,
'quanmin.tv' : QuanMinDanMuClient,
'zhanqi.tv' : ZhanQiDanMuClient,
'live.bilibili.com' : BilibiliDanMuClient,
'huomao.com' : HuoMaoDanMuClient, }.items() :
if re.match(r'^(?:http://)?.*?%s/(.+?)$' % u, url):
self.__baseClient = bc; break
def __register(self, fn, msgType):
if fn is None:
if msgType == 'default':
self.__functionDict['default'] = lambda x: 0
elif self.__functionDict.get(msgType):
del self.__functionDict[msgType]
else:
self.__functionDict[msgType] = fn
def isValid(self):
return self.__baseClient is not None
def default(self, fn):
self.__register(fn, 'default')
return fn
def danmu(self, fn):
self.__register(fn, 'danmu')
return fn
def gift(self, fn):
self.__register(fn, 'gift')
return fn
def other(self, fn):
self.__register(fn, 'other')
return fn
def start(self, blockThread = False, pauseTime = .1):
if not self.__baseClient or self.__isRunning: return False
self.__client = self.__baseClient(self.__url)
self.__isRunning = True
receiveThread = threading.Thread(target=self.__client.start)
receiveThread.setDaemon(True)
receiveThread.start()
def _start():
while self.__isRunning:
if self.__client.msgPipe:
msg = self.__client.msgPipe.pop()
fn = self.__functionDict.get(msg['MsgType'],
self.__functionDict['default'])
try:
fn(msg)
except:
traceback.print_exc()
else:
time.sleep(pauseTime)
if blockThread:
try:
_start()
except KeyboardInterrupt:
print('Bye~')
else:
danmuThread = threading.Thread(target = _start)
danmuThread.setDaemon(True)
danmuThread.start()
return True
def stop(self):
self.__isRunning = False
if self.__client: self.__client.deprecated = True
|
test_base_events.py
|
"""Tests for base_events.py"""
import errno
import logging
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
try:
from test.support.script_helper import assert_python_ok
except ImportError:
try:
from test.script_helper import assert_python_ok
except ImportError:
from asyncio.test_support import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
if hasattr(socket, 'SOCK_NONBLOCK'):
self.assertEqual(
None,
base_events._ipaddr_info(
'1.2.3.4', 1, INET, STREAM | socket.SOCK_NONBLOCK, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
executor = mock.Mock()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_getnameinfo(self):
sockaddr = mock.Mock()
self.loop.run_in_executor = mock.Mock()
self.loop.getnameinfo(sockaddr)
self.assertEqual(
(None, socket.getnameinfo, sockaddr, 0),
self.loop.run_in_executor.call_args[0])
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
def cb():
pass
f = asyncio.Future(loop=self.loop)
executor = mock.Mock()
executor.submit.return_value = f
self.loop.set_default_executor(executor)
res = self.loop.run_in_executor(None, cb)
self.assertIs(f, res)
executor = mock.Mock()
executor.submit.return_value = f
res = self.loop.run_in_executor(executor, cb)
self.assertIs(f, res)
self.assertTrue(executor.submit.called)
f.cancel() # Don't complain about abandoned Future.
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
@mock.patch('asyncio.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
# issues.
time.sleep(1.1)
return []
# logging needs debug flag
self.loop.set_debug(True)
# Log to INFO level if timeout > 1.0 sec.
self.loop._selector.select = slow_select
self.loop._process_events = mock.Mock()
self.loop._run_once()
self.assertEqual(logging.INFO, m_logger.log.call_args[0][0])
def fast_select(timeout):
time.sleep(0.001)
return []
self.loop._selector.select = fast_select
self.loop._run_once()
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
@asyncio.coroutine
def zero_error_coro():
yield from asyncio.sleep(0.01, loop=self.loop)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
@asyncio.coroutine
def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = ()
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org')
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com')
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com')
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True,
server_hostname='')
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='')
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
@asyncio.coroutine
def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = []
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_address=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEADDR defaults to on for UNIX.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuse_address_default_on = (
os.name == 'posix' and sys.platform != 'cygwin')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuse_address_default_on:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
else:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True,
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False,
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_address=False,
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY)
def test_call_coroutine(self):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_in_executor(None, func)
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
yield from ()
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
if __name__ == '__main__':
unittest.main()
|
scrape_bittrex.py
|
# core
import os
import gc
import re
import time
from datetime import datetime
from threading import Thread
# installed
import pandas as pd
import requests
import psycopg2 as pg
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
# for writing to sql with pandas
from sqlalchemy import create_engine
PG_UNAME = os.environ.get('postgres_uname')
PG_PASS = os.environ.get('postgres_pass')
TH_DB = 'bittrex'
# create db if not already there
# check if db exists
def create_db_conn():
try:
conn = pg.connect(dbname=TH_DB, user=PG_UNAME, password=PG_PASS)
except pg.OperationalError:
conn = pg.connect(dbname='postgres', user=PG_UNAME, password=PG_PASS)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute('CREATE DATABASE ' + TH_DB)
cur.close()
conn.close()
conn = pg.connect(dbname=TH_DB, user='nate', password=PG_PASS)
return conn
def get_all_tables():
# gets list of all tables
cursor = conn.cursor()
cursor.execute("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';")
return cursor.fetchall()
def get_home_dir():
cwd = os.getcwd()
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == 'crypto_predict']
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def get_all_currency_pairs(show_mkts=False):
while True: # in case of ssl error
try:
res = requests.get('https://bittrex.com/api/v1.1/public/getmarkets')
break
except Exception as e:
print(e)
time.sleep(10)
if res.json()['success']:
markets = res.json()['result']
market_names = []
for m in markets:
if show_mkts:
print(m['MarketName'])
market_names.append(m['MarketName'])
return sorted(market_names)
else:
print('error! ', res.json()['message'])
return None
HOME_DIR = '/media/nate/data_lake/crytpo_predict/'#get_home_dir()
MARKETS = get_all_currency_pairs()
def get_all_summaries():
while True: # just in case of SSL error
try:
res = requests.get('https://bittrex.com/api/v1.1/public/getmarketsummaries')
break
except Exception as e:
print(e)
time.sleep(10)
if res.json()['success']:
summary = res.json()['result']
return summary
else:
print('error! ', res.json()['message'])
return None
def get_all_tickers():
tickers = []
for m in MARKETS:
while True:
try:
res = requests.get('https://bittrex.com/api/v1.1/public/getticker?market=' + m)
break
except Exception as e:
print(e)
time.sleep(10)
if res.json()['success']:
t = res.json()['result']
if t is None:
print('error for', m + '!', 'result was None. Message:', res.json()['message'])
continue
t['MarketName'] = m
tickers.append(t)
else:
print('error for', m + '!', res.json()['message'])
df = pd.io.json.json_normalize(tickers)
df.set_index('MarketName', inplace=True)
return df
def get_trade_history(market):
tries = 0
while True: # sometimes an SSL connection error...just wait a few seconds and try again
tries += 1
if tries == 6:
return None
try:
res = requests.get('https://bittrex.com/api/v1.1/public/getmarkethistory?market=' + market)
break
except Exception as e:
print(e)
time.sleep(10)
try:
if res.json()['success']:
history = res.json()['result']
return history
else:
print('error! ', res.json()['message'])
return None
except Exception as e:
print('exception! error!')
print(e)
return None
def save_all_trade_history():
for m in MARKETS:
print('saving', m, 'trade history')
history = get_trade_history(m)
if history is None or len(history) == 0:
print('no history!')
continue
df = make_history_df(history)
filename = HOME_DIR + 'data/trade_history/' + re.sub('-', '_', m) + '.csv.gz'
if os.path.exists(filename):
old_df = pd.read_csv(filename, index_col='TimeStamp')
full_df = old_df.append(df)
full_df.drop_duplicates(inplace=True)
else:
full_df = df
full_df.to_csv(filename, compression='gzip')
# pause 5s to allow for graceful shutdown for now
del history
del df
try:
del old_df
except NameError:
pass
del full_df
gc.collect()
print('done saving; resting 5s')
time.sleep(2)
print('\n\ndone!\n\n')
def save_all_trade_history_old():
"""
saves data to CSVs...pretty inefficient
"""
for m in MARKETS:
print('saving', m, 'trade history')
history = get_trade_history(m)
if history is None or len(history) == 0:
print('no history!')
continue
df = make_history_df(history)
filename = HOME_DIR + 'data/trade_history/' + re.sub('-', '_', m) + '.csv.gz'
if os.path.exists(filename):
old_df = pd.read_csv(filename, index_col='TimeStamp')
full_df = old_df.append(df)
full_df.drop_duplicates(inplace=True)
else:
full_df = df
full_df.to_csv(filename, compression='gzip')
print('done!\n\n')
def save_all_trade_history_sql():
"""
saves data to sql
"""
for m in MARKETS:
print('saving', m, 'trade history')
history = get_trade_history(m)
if history is None or len(history) == 0:
print('no history!')
continue
df = make_history_df(history)
filename = HOME_DIR + 'data/trade_history/' + re.sub('-', '_', m) + '.csv.gz'
if os.path.exists(filename):
old_df = pd.read_csv(filename, index_col='TimeStamp')
full_df = old_df.append(df)
full_df.drop_duplicates(inplace=True)
else:
full_df = df
full_df.to_csv(filename, compression='gzip')
print('done!\n\n')
def read_history_csv(market):
filename = HOME_DIR + 'data/trade_history/' + re.sub('-', '_', market) + '.csv.gz'
df = pd.read_csv(filename, index_col='TimeStamp')
return df
def convert_history_to_sql():
"""
WARNING: will replace all data in SQL tables
"""
idx = MARKETS.index('BTC-SALT')
ms = MARKETS[idx:]
for m in ms:
print(m)
engine = create_engine("postgres://nate:{}@localhost:5432/postgres".format(PG_PASS))
engine.table_names()
conn = engine.connect()
conn.execute("commit")
table_name = '"' + m + '"'
# try to create db unless already there, then skip creation
try:
conn.execute("create database " + table_name + ';')
except Exception as e:
print(e)
pass
conn.execute("commit")
conn.close()
engine = create_engine('postgresql://nate:{}@localhost:5432/{}'.format(PG_PASS, m))
df = read_history_csv(m)
df.to_sql(m, engine, if_exists='replace')
# cursor = conn.cursor()
# # all_tables = get_all_tables()
# was starting to do this with psycopg2 but forgot .to_sql in pandas...
# for m in MARKETS:
# table_name = '"' + m + '"'
# df = read_history_csv(m)
# # create table if doesn't exist
# if m not in all_tables:
# cursor.execute("""CREATE TABLE {} (
# TradeTime TIMESTAMP,
# FillType VARCHAR,
# Id INTEGER,
# OrderType VARCHAR,
# Price NUMERIC,
# Quantity NUMERIC,
# Total NUMERIC
# );""".format(table_name))
# times = pd.to_datetime(df.index).tz_localize('UTC')
# row = df.iloc[0]
# tup = (m,
# times[0],
# row['FillType'],
# int(row['Id']),
# row['OrderType'],
# row['Price'],
# row['Quantity'],
# row['Total'])
# cursor.execute("""INSERT INTO %s
# (TradeTime, FillType, Id, OrderType, Price, Quantity, Total)
# VALUES (%s, %s, %s, %s, %s, %s, %s);""", tup)
def get_order_book(market):
try:
while True: # in case of SSL error, keep trying
try:
res = requests.get('https://bittrex.com/api/v1.1/public/getorderbook?market=' + market + '&type=both&depth=50000')
break
except Exception as e:
print(e)
time.sleep(10)
timestamp = pd.to_datetime(datetime.now())
if res.json()['success']:
orders = res.json()['result']
if orders['buy'] is None and orders['sell'] is None:
print('error! both buy and sell orders are none')
return None, None
return orders, timestamp
else:
print('error!', res.json()['message'])
return None, None
except Exception as e:
print(e)
print('exception! error!')
return None, None
def make_orderbook_df(orders, timestamp, time_idx=True):
buy_df = pd.io.json.json_normalize(orders['buy'])
buy_df['timestamp'] = timestamp
sell_df = pd.io.json.json_normalize(orders['sell'])
sell_df['timestamp'] = timestamp
if time_idx:
sell_df.set_index('timestamp', inplace=True)
buy_df.set_index('timestamp', inplace=True)
return buy_df, sell_df
def make_history_df(history, time_idx=True):
df = pd.io.json.json_normalize(history)
df['TimeStamp'] = pd.to_datetime(df['TimeStamp'])
if time_idx:
df.set_index('TimeStamp', inplace=True)
return df
def make_summary_df(summary):
keys = summary[0].keys()
data_dict = {}
for k in keys:
data_dict[k] = []
for s in summary:
for k in keys:
data_dict[k].append(s[k])
df = pd.DataFrame(data_dict)
df['TimeStamp'] = pd.to_datetime(df['TimeStamp'])
df['Created'] = pd.to_datetime(df['Created'])
df['24hr_chg'] = df['Last'] - df['PrevDay']
df['24hr_chg_pct'] = df['24hr_chg'] / df['PrevDay'] * 100
return df
def save_order_book(market):
orders, timestamp = get_order_book(market)
if orders is None and timestamp is None:
return
if len(orders['buy']) + len(orders['sell']) == 0:
print('no orders, skipping')
return
buy_df, sell_df = make_orderbook_df(orders, timestamp)
key = re.sub('-', '_', market)
buy_file = HOME_DIR + 'data/order_books/buy_orders_' + key + '.csv.gz'
sell_file = HOME_DIR + 'data/order_books/sell_orders_' + key + '.csv.gz'
if os.path.exists(buy_file):
buy_df.to_csv(buy_file, compression='gzip', mode='a', header=False)
sell_df.to_csv(sell_file, compression='gzip', mode='a', header=False)
else:
buy_df.to_csv(buy_file, compression='gzip')
sell_df.to_csv(sell_file, compression='gzip')
del orders
del timestamp
del buy_df
del sell_df
gc.collect()
def save_all_order_books():
for m in MARKETS:
print('saving', m, '...')
save_order_book(m)
print('sleeping 5s...')
time.sleep(5)
def read_order_book(market):
fileend = re.sub('-', '_', market + '.csv.gz')
buy_df = pd.read_csv(HOME_DIR + 'data/order_books/buy_orders_' + fileend, index_col='timestamp')
sell_df = pd.read_csv(HOME_DIR + 'data/order_books/sell_orders_' + fileend, index_col='timestamp')
return buy_df, sell_df
def continuously_save_order_books(interval=600):
"""
Saves all order books every 'interval' seconds.
"""
def keep_saving():
while True:
save_all_order_books()
print("\n\ndone.")
time.sleep(interval)
thread = Thread(target=keep_saving)
thread.start()
def continuously_save_trade_history(interval=300):
"""
Saves all trade history every 'interval' seconds.
"""
def keep_saving():
while True:
save_all_trade_history()
time.sleep(interval)
thread = Thread(target=keep_saving)
thread.start()
def continuously_save_summaries(interval=300):
"""
Saves all trade history every 'interval' seconds.
"""
def keep_saving():
while True:
save_all_summaries()
time.sleep(interval)
thread = Thread(target=keep_saving)
thread.start()
def get_total_buy_sell_orders():
"""
Calculates total buy/sell order volume in BTC and USD.
"""
books = {}
for m in MARKETS:
print(m)
fileend = re.sub('-', '_', m + '.csv.gz')
if os.path.exists(HOME_DIR + 'data/order_books/buy_orders_' + fileend):
books[m] = {}
books[m]['buy'], books[m]['sell'] = read_order_book(m)
tickers = get_all_tickers()
ticker_markets = set(tickers.index)
total_sells = {}
total_buys = {}
sells_minus_buys = {}
cur_pairs = list(books.keys())
for cur_pair in books.keys():
if cur_pair not in ticker_markets:
print('market for', cur_pair, 'not in ticker data')
continue
print(cur_pair)
b = books[cur_pair]
latest_sell_time = b['sell'].index.unique().max()
latest_buy_time = b['buy'].index.unique().max()
latest_sells = b['sell'].loc[latest_sell_time]
latest_buys = b['buy'].loc[latest_buy_time]
total_sell = (latest_sells['Quantity'] * tickers.loc[cur_pair]['Last']).sum()
total_buy = (latest_buys['Quantity'] * latest_buys['Rate']).sum()
total_sells[cur_pair] = total_sell
total_buys[cur_pair] = total_buy
sells_minus_buys[cur_pair] = total_sell - total_buy
return total_sells, total_buys, sells_minus_buys
def make_buy_sell_df(total_sells, total_buys, sells_minus_buys):
sells = []
buys = []
minus = []
marks = list(total_sells.keys())
for m in marks:
sells.append(total_sells[m])
buys.append(total_buys[m])
minus.append(sells_minus_buys[m])
df = pd.DataFrame({'total_sells':sells,
'total_buys':buys,
'sells_minus_buys':minus,
'MarketName':marks})
df.set_index('MarketName', inplace=True)
return df
def get_buy_sell_df():
total_sells, total_buys, sells_minus_buys = get_total_buy_sell_orders()
df = make_buy_sell_df(total_sells, total_buys, sells_minus_buys)
return df
|
603_Python_multiprocessingUsingLockAndSharedMemory.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 7 19:21:34 2019
@author: yangyutu123
"""
# a good source: https://pymotw.com/2/multiprocessing/basics.html
# Here we show how to use Queue to store results from individual processes.
import multiprocessing as mp
import time
# here d is the type code for double, 0 is the initial value.
# For the complete list of type codes, see https://docs.python.org/3/library/array.html
# When there is no lock, we can see two process interleaved together, i.e., data racing.
def workerNoLock(sum, num):
for i in range(10):
time.sleep(0.1)
sum.value = sum.value + num
print(sum.value)
# When there is lock, we can see the correct summation performed by two process in order
def workerWithLock(lock, sum, num):
lock.acquire()
for i in range(10):
time.sleep(0.1)
sum.value = sum.value + num
print(sum.value)
lock.release()
if __name__ == '__main__':
sharedSum = mp.Value('d', 0)
p1 = mp.Process(target=workerNoLock, args=(sharedSum, 1))
p2 = mp.Process(target=workerNoLock, args=(sharedSum, 10))
p1.start()
p2.start()
p1.join()
p2.join()
print("use lock")
lock = mp.Lock()
p1 = mp.Process(target=workerWithLock, args=(lock, sharedSum, 1))
p2 = mp.Process(target=workerWithLock, args=(lock, sharedSum, 10))
p1.start()
p2.start()
p1.join()
p2.join()
|
detect_video.py
|
import time
import json
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs
from centroidtracker import CentroidTracker
from performance import TimeMeasure
import threading
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
flags.DEFINE_string('classes', './data/labels/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './weights/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video/paris.mp4',
'path to video file or number for webcam)')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
flags.DEFINE_string('logs', './detections/report2.json', 'path to result logs')
def img_read_wrapper(vid, out_queue: Queue, out_queue2: Queue):
print("img_read_wrapper: {}".format(threading.current_thread()))
global stop_threads
count = 0
frame_count = 0
while True:
_, img = vid.read()
if img is None or stop_threads:
logging.warning("Empty Frame:" + str(frame_count))
time.sleep(0.1)
count += 1
if count < 3:
continue
else:
print("Stopeed")
out_queue.put(None)
out_queue2.put(None)
break
else:
frame_count += 1
img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_in = tf.expand_dims(img_in, 0)
img_in = transform_images(img_in, FLAGS.size)
out_queue.put(img_in)
out_queue2.put(img)
def predict_wrapper(yolo, in_queue: Queue, out_queue: Queue):
print("prediction_wrapper: {}".format(threading.current_thread()))
global stop_threads
fps = 0.0
while True:
img_in = in_queue.get()
if img_in is None or stop_threads:
out_queue.put(None)
break
t1 = time.time()
with TimeMeasure('Prediction'):
boxes, scores, classes, nums = yolo.predict(img_in)
fps = (fps + (1. / (time.time() - t1))) / 2
output = {'boxes': boxes, 'scores': scores, 'classes': classes, 'nums': nums, 'fps': fps}
out_queue.put(output)
def display_wrapper(out, FLAGS, in_queue: Queue, in2_queue: Queue):
print("display_wrapper: {}".format(threading.current_thread()))
global stop_threads
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
data_log = {}
frame_count = 0
ct = CentroidTracker()
while True:
data = in_queue.get()
img = in2_queue.get()
if data is None or img is None:
break
boxes, scores, classes, nums, fps = data['boxes'], data['scores'], data['classes'], data['nums'], data['fps']
with TimeMeasure('Display frame:' + str(frame_count)):
img, rects, log = draw_outputs(img, (boxes, scores, classes, nums), class_names)
img = cv2.putText(img, "FPS: {:.2f}".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
objects = ct.update(rects)
if FLAGS.output:
out.write(img)
data_log['frame{}'.format(str(frame_count))] = log
frame_count += 1
cv2.imshow('output', img)
if cv2.waitKey(1) == ord('q'):
stop_threads = True
break
with open(FLAGS.logs, 'w') as f:
json.dump(data_log, f)
cv2.destroyAllWindows()
processed_img_queue = Queue()
raw_img_queue = Queue()
yolo_result_queue = Queue()
stop_threads = False
def main(_argv):
print("Start")
start_time = time.time()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
logging.info('classes loaded')
times = []
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
out = None
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
with ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(img_read_wrapper, vid, processed_img_queue, raw_img_queue)
executor.submit(predict_wrapper, yolo, processed_img_queue, yolo_result_queue)
display_wrapper(out, FLAGS, yolo_result_queue, raw_img_queue)
# read_thread = threading.Thread(target=img_read_wrapper, args=(vid, processed_img_queue, raw_img_queue))
# predict_thread = threading.Thread(target=predict_wrapper, args=(yolo, processed_img_queue, yolo_result_queue))
# display_thread = threading.Thread(target=display_wrapper, args=(out, FLAGS, yolo_result_queue, raw_img_queue))
# threads = [read_thread, predict_thread, display_thread]
# for t in threads:
# t.start()
# for t in threads:
# t.join()
print("FInish", time.time() - start_time)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
remote.py
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import time
import torch
import torch.multiprocessing as mp
from salina import Agent
from salina.workspace import Workspace, _SplitSharedWorkspace
def f(agent, in_queue, out_queue, seed):
out_queue.put("ok")
running = True
old_workspace = None
print("Seeding remote agent with ", seed)
agent.seed(seed)
while running:
command = in_queue.get()
if command[0] == "go_new_workspace":
_, workspace, args = command
old_workspace = workspace
agent(workspace, **args)
out_queue.put("ok")
elif command[0] == "go_reuse_workspace":
_, _, args = command
agent(old_workspace, **args)
out_queue.put("ok")
elif command[0] == "exit":
out_queue.put("ok")
return
class RemoteAgent(Agent):
def __init__(self, agent, name=None):
super().__init__(name=name)
self.agent = agent
self._is_running = False
self.process = None
self.last_workspace = None
def get_by_name(self, n):
if self._name == n:
return [self] + self.agent.get_by_name(n)
else:
return self.agent.get_by_name(n)
def forward(self, **args):
raise NotImplementedError
def _create_process(self):
print("[RemoteAgent] starting process...")
self.i_queue = mp.Queue()
self.o_queue = mp.Queue()
self.i_queue.cancel_join_thread()
self.o_queue.cancel_join_thread()
self.process = mp.Process(
target=f, args=(self.agent, self.i_queue, self.o_queue, self._seed)
)
self.process.daemon = False
self.process.start()
r = self.o_queue.get()
def __call__(self, workspace, **args):
with torch.no_grad():
assert (
workspace.is_shared
), "You must use a shared workspace when using a Remote Agent"
if self.process is None:
self._create_process()
if not workspace == self.last_workspace:
self.i_queue.put(("go_new_workspace", workspace, args))
self.last_workspace = workspace
r = self.o_queue.get()
assert r == "ok"
else:
self.i_queue.put(("go_reuse_workspace", workspace, args))
r = self.o_queue.get()
assert r == "ok"
def _asynchronous_call(self, workspace, **args):
with torch.no_grad():
self._is_running = True
assert (
workspace.is_shared
), "You must use a shared workspace when using a Remote Agent"
if self.process is None:
self._create_process()
if not workspace == self.last_workspace:
self.i_queue.put(("go_new_workspace", workspace, args))
self.last_workspace = workspace
else:
self.i_queue.put(("go_reuse_workspace", workspace, args))
def seed(self, _seed):
self._seed = _seed
def _running_queue(self):
return self.o_queue
def is_running(self):
if self._is_running:
try:
r = self.o_queue.get(False)
assert r == "ok"
self._is_running = False
except:
pass
return self._is_running
def close(self):
if self.process is None:
return
print("[RemoteAgent] closing process")
self.i_queue.put(("exit",))
self.o_queue.get()
time.sleep(0.1)
self.process.terminate()
self.process.join()
self.i_queue.close()
self.o_queue.close()
time.sleep(0.1)
del self.i_queue
del self.o_queue
self.process = None
def __del__(self):
self.close()
class NRemoteAgent(Agent):
"""A set of multiple agents"""
def __init__(self, agents, batch_dims):
super().__init__()
self.agents = agents
self.batch_dims = batch_dims
def get_by_name(self, name):
r = []
if self._name == name:
r = [self]
for a in self.agents:
r = r + a.get_by_name(name)
return r
def create(agent, num_processes=0, time_size=None, **extra_args):
agent.seed(0)
if num_processes == 0:
workspace = Workspace()
_agent = copy.deepcopy(agent)
agent(workspace, **extra_args)
shared_workspace = workspace._convert_to_shared_workspace(
n_repeat=1, time_size=time_size
)
return _agent, shared_workspace
workspace = Workspace()
agents = [copy.deepcopy(agent) for t in range(num_processes)]
agent(workspace, **extra_args)
b = workspace.batch_size()
batch_dims = [(k * b, k * b + b) for k, a in enumerate(agents)]
shared_workspace = workspace._convert_to_shared_workspace(
n_repeat=num_processes
)
agents = [RemoteAgent(a) for a in agents]
return NRemoteAgent(agents, batch_dims), shared_workspace
def __call__(self, workspace, **args):
assert workspace.is_shared
for k in range(len(self.agents)):
_workspace = _SplitSharedWorkspace(workspace, self.batch_dims[k])
self.agents[k]._asynchronous_call(_workspace, **args)
for a in self.agents:
ok = a._running_queue().get()
assert ok == "ok"
def seed(self, seed, inc=1):
s = seed
for a in self.agents:
a.seed(s)
s += inc
def _asynchronous_call(self, workspace, **args):
assert workspace.is_shared
for k in range(len(self.agents)):
_workspace = _SplitSharedWorkspace(workspace, self.batch_dims[k])
self.agents[k]._asynchronous_call(_workspace, **args)
def is_running(self):
for a in self.agents:
if a.is_running():
return True
return False
|
a-lot-of-parallel-tasks.py
|
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
from __future__ import unicode_literals
import random
import threading
import time
from prompt_toolkit2 import HTML
from prompt_toolkit2.shortcuts import ProgressBar
def main():
with ProgressBar(
title=HTML('<b>Example of many parallel tasks.</b>'),
bottom_toolbar=HTML('<b>[Control-L]</b> clear <b>[Control-C]</b> abort')) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = []
for i in range(160):
label = 'Task %i' % i
total = random.randrange(50, 200)
sleep_time = random.randrange(5, 20) / 100.
threads.append(threading.Thread(target=run_task, args=(label, total, sleep_time)))
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=.5)
if __name__ == '__main__':
main()
|
test_issue_701.py
|
import asyncio
import collections
import logging
import os
import threading
import time
import unittest
import pytest
from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN
from integration_tests.helpers import async_test, is_not_specified
from slack_sdk.rtm import RTMClient
from slack_sdk.web import WebClient
class TestRTMClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slack-sdk/issues/701
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
# @pytest.mark.skipif(condition=is_not_specified(), reason="to avoid rate_limited errors")
@pytest.mark.skip()
def test_receiving_all_messages(self):
self.rtm_client = RTMClient(token=self.bot_token, loop=asyncio.new_event_loop())
self.web_client = WebClient(token=self.bot_token)
self.call_count = 0
@RTMClient.run_on(event="message")
def send_reply(**payload):
self.logger.debug(payload)
web_client, data = payload["web_client"], payload["data"]
web_client.reactions_add(
channel=data["channel"], timestamp=data["ts"], name="eyes"
)
self.call_count += 1
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
rtm = threading.Thread(target=connect)
rtm.daemon = True
rtm.start()
time.sleep(3)
total_num = 10
sender_completion = []
def sent_bulk_message():
for i in range(total_num):
text = f"Sent by <https://slack.dev/python-slackclient/|python-slackclient>! ({i})"
self.web_client.chat_postMessage(channel="#random", text=text)
time.sleep(0.1)
sender_completion.append(True)
num_of_senders = 3
senders = []
for sender_num in range(num_of_senders):
sender = threading.Thread(target=sent_bulk_message)
sender.daemon = True
sender.start()
senders.append(sender)
while len(sender_completion) < num_of_senders:
time.sleep(1)
expected_call_count = total_num * num_of_senders
wait_seconds = 0
max_wait = 20
while self.call_count < expected_call_count and wait_seconds < max_wait:
time.sleep(1)
wait_seconds += 1
self.assertEqual(
total_num * num_of_senders, self.call_count, "The RTM handler failed"
)
@pytest.mark.skipif(
condition=is_not_specified(), reason="to avoid rate_limited errors"
)
@async_test
async def test_receiving_all_messages_async(self):
self.rtm_client = RTMClient(token=self.bot_token, run_async=True)
self.web_client = WebClient(token=self.bot_token, run_async=False)
self.call_count = 0
@RTMClient.run_on(event="message")
async def send_reply(**payload):
self.logger.debug(payload)
web_client, data = payload["web_client"], payload["data"]
await web_client.reactions_add(
channel=data["channel"], timestamp=data["ts"], name="eyes"
)
self.call_count += 1
# intentionally not waiting here
self.rtm_client.start()
await asyncio.sleep(3)
total_num = 10
sender_completion = []
def sent_bulk_message():
for i in range(total_num):
text = f"Sent by <https://slack.dev/python-slackclient/|python-slackclient>! ({i})"
self.web_client.chat_postMessage(channel="#random", text=text)
time.sleep(0.1)
sender_completion.append(True)
num_of_senders = 3
senders = []
for sender_num in range(num_of_senders):
sender = threading.Thread(target=sent_bulk_message)
sender.daemon = True
sender.start()
senders.append(sender)
while len(sender_completion) < num_of_senders:
await asyncio.sleep(1)
expected_call_count = total_num * num_of_senders
wait_seconds = 0
max_wait = 20
while self.call_count < expected_call_count and wait_seconds < max_wait:
await asyncio.sleep(1)
wait_seconds += 1
self.assertEqual(
total_num * num_of_senders, self.call_count, "The RTM handler failed"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.