source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
main.py
|
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import (
ObjectProperty, StringProperty
)
from kivy.config import Config
Config.set('graphics', 'width', '800')
Config.set('graphics', 'height', '600')
Config.set('input', 'mouse', 'mouse,disable_multitouch')
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner
from kivymd.uix.picker import MDDatePicker
from kivymd.app import MDApp
from plyer import filechooser # Do this during packaging: https://github.com/kivy/plyer/issues/613
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.storage.jsonstore import JsonStore
from kivy.clock import Clock
from base import Hologram
from datetime import datetime, timedelta
from requests.exceptions import RequestException
import requests
import threading
import functools
import os
import tempfile
import json
import sys
credentials_path = os.path.join(tempfile.gettempdir(), 'credentials.json')
f = open('timezones.json')
timezones = json.load(f)
tz_offset = {
tz['text']: tz['offset']
for tz in timezones
}
args = {n: arg for n, arg in enumerate(sys.argv)}
# Holds the label of every section
class SectionLabel(Label):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Holds as blank empty space
class BlankSpace(Widget):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.size_hint = (1, 0.01)
# Date picker
class PickHour(Spinner):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.text = list(tz_offset.keys())[0]
self.values = list(tz_offset.keys())
class ShellCommand(Label):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.multiline = True
self.text = ''
about_us_text = '''
[b]Manuel A. Andrade[/b]*
Assistant Professor of Water and Irrigation Management
andradea@unr.edu
[b]Diego A. Quintero[/b]*
Graduate research assistant
dquintero@nevada.unr.edu
[b]Uriel Cholula[/b]*
Graduate research assistant
ucholula@nevada.unr.edu
*University of Nevada, Reno
Dept. of Agriculture, Veterinary and Rangeland Sciences
Water and Irrigation Management Lab
The development of this software is based upon work that is
supported by the Nevada System of Higher Education (NSHE).
'''
class AboutUSLabel(Label):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.text=about_us_text
self.size_hint=(1, 0.80)
self.markup=True
self.halign='left'
class WarningLabel(Label):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.size_hint=(1, 0.75)
self.markup=True
self.halign='left'
class WarningPopup(Popup):
def __init__(self, popup_text, title='Warning', **kwargs):
super().__init__(**kwargs)
self.title = title
self.size = (400, 170)
self.size_hint = (None, None)
self.content = BoxLayout(orientation = 'vertical')
warn_text = WarningLabel()
warn_text.text = popup_text
self.content.add_widget(warn_text)
self.content.add_widget(
Button(
text='Close',
on_release=self.dismiss,
size_hint=(0.5, 0.25),
pos_hint={'center_x': 0.5},
padding_y=50
)
)
class AboutUS(Popup):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title = 'PyHOLA_GUI Version 0.1.2111a'
self.size = (450, 450)
self.size_hint = (None, None)
self.content = BoxLayout(orientation = 'vertical')
self.content.add_widget(
AboutUSLabel()
)
self.content.add_widget(BlankSpace(size_hint=(1, 0.05)))
self.content.add_widget(
Button(
text='Close',
on_release=self.dismiss,
size_hint=(0.5, 0.10),
pos_hint={'center_x': 0.5},
padding_y=50
)
)
self.content.add_widget(BlankSpace(size_hint=(1, 0.05)))
# Root widget
class Root(BoxLayout):
# Retrieve credentials
store = JsonStore(credentials_path)
try:
deviceid = store.get('credentials')['deviceid']
orgid = store.get('credentials')['orgid']
apikey = store.get('credentials')['apikey']
timezone = store.get('parameters')['timezone']
except KeyError:
deviceid = ''
orgid = ''
apikey = ''
timezone =list(tz_offset.keys())[0]
download_progress = 0
Hol = Hologram(
deviceID=deviceid,
apiKey=apikey,
startTime=None,
endTime=None,
orgID=orgid,
)
date_from = None
date_to = None
def __init__(self, **kwargs):
super(Root, self).__init__(orientation='vertical', *kwargs)
Clock.schedule_interval(self.terminate_download, 0.5)
'''
This is ussed during testing to set parameters
'''
if args.get(1) == 'test':
import os
base_date = datetime(2021, 10, 31)
self.date_range = [base_date - timedelta(days=x) for x in range(100)]
self.date_range = self.date_range[::-1]
self.path = ['C:\\Users\\dandres\\Desktop\\test.csv']
self.ids.file_label.text = str(self.path[0])
ddrange = f'Download from {self.date_range[0]} to {self.date_range[-1]}'
self.ids.date_label.text = f'{ddrange}'
try:
os.remove(self.path[0])
except FileNotFoundError:
pass
# Save date
def on_save(self, instance, value, date_range):
ddrange = f'Download from {date_range[0]} to {date_range[-1]}'
self.ids.date_label.text = f'{ddrange}'
self.date_range = date_range
# Cancel_date
def on_cancel(self, instance, value):
self.ids.date_label.text = 'Select a date'
def print_msg(self, msg='Downloading'):
self.ids.console_prompt.text += str(msg)+'\n'
def show_date_picker(self):
date_dialog = MDDatePicker(mode='range')
date_dialog.bind(on_save=self.on_save, on_cancel=self.on_cancel)
date_dialog.open()
def open_file(self):
self.path = filechooser.open_file(
title="Pick a CSV file..",
filters=[("Comma-separated Values", "*.csv")]
)
try:
self.ids.file_label.text = str(self.path[0])
except IndexError:
pass
def open_aboutus(self):
popup = AboutUS()
popup.open()
def open_warn(self, message, title='Warning'):
popup = WarningPopup(message, title)
popup.open()
def update_progress(self, dt):
# stuff that must be done on the main thread
self.ids.progressbar.value += 1
def download_main_thread(self):
donwload_thread = threading.Thread(target=self.download_trigger)
donwload_thread.start()
def download_trigger(self):
if args.get(1) == 'test':
try:
os.remove(self.path[0])
except FileNotFoundError:
pass
if not hasattr(self, 'date_range'):
self.open_warn('You must define a date range')
return None
try:
self.path
except AttributeError:
self.open_warn('You must select a file to download the records')
return None
if os.path.exists(self.path[0]) and not self.ids.append.active:
self.open_warn('File already exists, you must create a new one')
return None
if not os.path.exists(self.path[0]) and self.ids.append.active:
self.open_warn('File does not exist, you must select a existing file to append records to')
return None
try:
_ = requests.get('http://www.google.com', timeout=3)
except (requests.ConnectionError, requests.Timeout):
self.open_warn('No internet connection')
return None
self.store.put(
'credentials',
deviceid=self.ids.deviceID.text,
orgid=self.ids.OrganizationID.text,
apikey=self.ids.APIKey.text
)
self.store.put(
'parameters',
timezone=self.ids.timeDelta.text
)
self.download_init()
self.date_from = datetime.combine(self.date_range[0], datetime.min.time())
self.date_from -= timedelta(days=1)
self.date_to = datetime.combine(self.date_range[-1], datetime.min.time()) + timedelta(days=1)
if self.date_to > datetime.now():
self.date_to = datetime.now() + timedelta(days=1)
self.ids.progressbar.max = (self.date_to - self.date_from).days
endDate = self.date_to
self.date_to = self.date_from + timedelta(days=1)
while self.date_from < endDate:
self.Hol.startTime = self.date_from
self.Hol.endTime = self.date_to
self.download()
self.date_from += timedelta(days=1)
self.date_to += timedelta(days=1)
print()
def terminate_download(self, dt):
if self.ids.progressbar.value == self.ids.progressbar.max:
if len(self.Hol.records) < 1:
self.open_warn('No records for the requested period')
self.Hol = Hologram(
deviceID=self.deviceid,
apiKey=self.apikey,
startTime=None,
endTime=None,
orgID=self.orgid,
)
self.ids.progressbar.max = 1e10
return
save_thread = threading.Thread(target=self.save_records)
save_thread.start()
self.ids.progressbar.value = 0
def download_init(self):
self.Hol.apiKey = self.ids.APIKey.text.strip()
self.Hol.orgID = self.ids.OrganizationID.text.strip()
if self.ids.oneDevice.active:
self.Hol.deviceID = self.ids.deviceID.text.strip()
else:
self.Hol.deviceID = None
self.ids.download_button.disabled = True
self.ids.download_button.text = '[b][color=303030]Downloading...[/b][/color]'
def download(self):
try:
self.Hol.startTime = self.date_from
self.Hol.endTime = self.date_to
try:
self.Hol.retrieve()
if self.ids.progressbar.value < self.ids.progressbar.max:
Clock.schedule_once(functools.partial(self.update_progress))
except RequestException:
self.ids.progressbar.max -= 1
return None
except RequestException:
self.open_warn('Something went wrong with the request')
self.ids.progressbar.max -= 1
def save_records(self):
self.ids.download_button.text = '[b][color=252525]Saving records...[/b][/color]'
self.Hol.save_records(
filepath=str(self.path[0]),
sep='\t',
append=self.ids.append.active,
timeDelta=tz_offset[self.ids.timeDelta.text],
absStartDate=self.date_range[0]
)
self.open_warn(f'{len(self.Hol.records)} records written to {self.path[0]}', 'Successful download')
self.Hol = Hologram(
deviceID=self.deviceid,
apiKey=self.apikey,
startTime=None,
endTime=None,
orgID=self.orgid,
)
self.ids.download_button.text = '[b]Download[/b]'
self.ids.download_button.disabled = False
self.ids.progressbar.value = 0
class PyHOLAApp(MDApp):
path = ObjectProperty(None)
out = StringProperty('')
def build(self):
return Root()
if __name__ == '__main__':
PyHOLAApp().run()
|
MultiThreadMaskRCNN.py
|
import numpy as np
import cv2, sys, queue, threading
from samples import coco
from mrcnn import utils
from mrcnn import model as modellib
import os
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
class InferenceConfig(coco.CocoConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = [
'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush'
]
def random_colors(N):
np.random.seed(1)
colors = [tuple(255 * np.random.rand(3)) for _ in range(N)]
return colors
colors = random_colors(len(class_names))
class_dict = {
name: color for name, color in zip(class_names, colors)
}
def apply_mask(image, mask, color, alpha=0.5):
"""apply mask to image"""
for n, c in enumerate(color):
image[:, :, n] = np.where(
mask == 1,
image[:, :, n] * (1 - alpha) + alpha * c,
image[:, :, n]
)
return image
def display_instances(image, boxes, masks, ids, names, scores):
"""
take the image and results and apply the mask, box, and Label
"""
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
label = names[ids[i]]
color = class_dict[label]
score = scores[i] if scores is not None else None
caption = '{} {:.2f}'.format(label, score) if score else label
mask = masks[:, :, i]
image = apply_mask(image, mask, color)
image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
image = cv2.putText(
image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2
)
return image
def get_image(cap, image_queue):
while 1:
_, img = cap.read()
image_queue.put(img,True)
image_queue = queue.Queue(5)
cap = cv2.VideoCapture(0)
video_thread = threading.Thread(target = get_image, args=(cap,image_queue))
video_thread.start()
y = model.detect([image_queue.get(True)],verbose=0)
def get_result(model,image_queue,result_queue):
while 1:
frame = image_queue.get(True)
maskresult = model.detect([frame],verbose=0)
r = maskresult[0]
#frame = display_instances(
# frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
#) # retrive the result
result = [frame,r]
#cv2.imshow('', frame);
while 1:
try:
result_queue.put(result.copy(),False)
result[0] = image_queue.get(False)
except:
break
result_queue = queue.Queue(5)
hs_thread = threading.Thread(target = get_result, args=(model,image_queue,result_queue))
hs_thread.start()
while 1:
frame,r = result_queue.get(True)
frame = display_instances(
frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
) # retrive the result
cv2.imshow(' ',frame)
cv2.waitKey(30)
cap.release()
cv2.destroyAllWindows()
|
gdal2tiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: gdal2tiles.py ac171f5b0fe544a6c1ff7400249f6ce362e9ace0 2018-04-16 06:32:36 +1000 Ben Elliston $
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
except Exception:
# 'antialias' resampling is not available
pass
__version__ = "$Id: gdal2tiles.py ac171f5b0fe544a6c1ff7400249f6ce362e9ace0 2018-04-16 06:32:36 +1000 Ben Elliston $"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tileSize, ty * self.tileSize, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != -1:
return i - 1
else:
return 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tileSize=256):
self.tileSize = tileSize
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tileSize
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tileSize
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i - 1
else:
return 0 # We don't want to scale up
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tileSize * res - 180,
ty * self.tileSize * res - 90,
(tx + 1) * self.tileSize * res - 180,
(ty + 1) * self.tileSize * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tilesize=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tilesize or imagesize[1] > tilesize):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tilesize' not in args:
args['tilesize'] = tilesize
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tilesize'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tilesize'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias':
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tilesize / float(querysize), 0.0, 0.0, 0.0,
tilesize / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
if nodata_values != []:
temp_file = gettempfilename('-gdal2tiles.vrt')
warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)
with open(temp_file, 'r') as f:
vrt_string = f.read()
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
# save the corrected VRT
with open(temp_file, 'w') as f:
f.write(vrt_string)
corrected_dataset = gdal.Open(temp_file)
os.unlink(temp_file)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
# TODO: gbataille - test replacing that with a gdal write of the dataset (more
# accurately what's used, even if should be the same
with open("tiles1.vrt", "w") as f:
f.write(vrt_string)
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
tempfilename = gettempfilename('-gdal2tiles.vrt')
warped_vrt_dataset.GetDriver().CreateCopy(tempfilename, warped_vrt_dataset)
with open(tempfilename) as f:
orig_data = f.read()
alpha_data = add_alpha_band_to_string_vrt(orig_data)
with open(tempfilename, 'w') as f:
f.write(alpha_data)
warped_vrt_dataset = gdal.Open(tempfilename)
os.unlink(tempfilename)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
# TODO: gbataille - test replacing that with a gdal write of the dataset (more
# accurately what's used, even if should be the same
with open("tiles1.vrt", "w") as f:
f.write(alpha_data)
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
else:
return dataset.RasterCount
def gettempfilename(suffix):
"""Returns a temporary filename"""
if '_' in os.environ:
# tempfile.mktemp() crashes on some Wine versions (the one of Ubuntu 12.04 particularly)
if os.environ['_'].find('wine') >= 0:
tmpdir = '.'
if 'TMP' in os.environ:
tmpdir = os.environ['TMP']
import time
import random
random.seed(time.time())
random_part = 'file%d' % random.randint(0, 1000000000)
return os.path.join(tmpdir, random_part + suffix)
return tempfile.mktemp(suffix)
def create_base_tile(tile_job_info, tile_detail, queue=None):
gdal.AllRegister()
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tilesize = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tilesize, tilesize, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tilesize - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
# Force freeing the memory to make sure the C++ destructor is called and the memory as well as
# the file locks are released
del ds
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
tile_job_info.tile_swne, tile_job_info.options
).encode('utf-8'))
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ty, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
dsquerytile = gdal.Open(
os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (y, tile_job_info.tile_extension)),
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if (len(args) == 0):
exit_with_error("You need to specify at least an input file as argument to the script")
if (len(args) > 2):
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
output_folder = os.path.basename(input_file)
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'average':
try:
if gdal.RegenerateOverview:
pass
except Exception:
exit_with_error("'average' resampling algorithm is not available.",
"Please use -r 'near' argument or upgrade to newer version of GDAL.")
elif options.resampling == 'antialias':
try:
if numpy: # pylint:disable=W0125
pass
except Exception:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
# Tile format
self.tilesize = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tilesize
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tilesize
elif self.options.resampling == 'bilinear':
self.querysize = self.tilesize * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?",
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tilesize))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tilesize))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tilesize))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tilesize)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tilesize
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tilesize * pixelsizex
east = west + self.tilesize * pixelsizex
south = self.ominy + y * self.tilesize * pixelsizex
north = south + self.tilesize * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tilesize, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tilesize)
wysize = int(rysize / float(tsize) * self.tilesize)
if wysize != self.tilesize:
wy = self.tilesize - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tilesize,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tilesize'] = self.tilesize # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" />
<script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>'});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.'});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==");
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s"});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
def worker_tile_details(input_file, output_folder, options, send_pipe=None):
try:
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return_data = (tile_job_info, tile_details)
if send_pipe:
send_pipe.send(return_data)
return return_data
except Exception as e:
print("worker_tile_details failed ", str(e))
def progress_printer_thread(queue, nb_jobs):
pb = ProgressBar(nb_jobs)
pb.start()
for _ in range(nb_jobs):
queue.get()
pb.log_progress()
queue.task_done()
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tilesize * pixelsizex
east = west + tile_job_info.tilesize * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tilesize * pixelsizex
north = south + tile_job_info.tilesize * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
(conf_receiver, conf_sender) = Pipe(False)
if options.verbose:
print("Begin tiles details calc")
p = Process(target=worker_tile_details,
args=[input_file, output_folder, options],
kwargs={"send_pipe": conf_sender})
p.start()
# Make sure to consume the queue before joining. If the payload is too big, it won't be put in
# one go in the queue and therefore the sending process will never finish, waiting for space in
# the queue to send data
conf, tile_details = conf_receiver.recv()
p.join()
if options.verbose:
print("Tiles details calc complete.")
# Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy,
# otherwise you can't pass it as a param in the method invoked by the pool...
manager = Manager()
queue = manager.Queue()
pool = Pool(processes=nb_processes)
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
# TODO: gbataille - check memory footprint and time on big image. are they opened x times
for tile_detail in tile_details:
pool.apply_async(create_base_tile, (conf, tile_detail), {"queue": queue})
if not options.verbose and not options.quiet:
p = Process(target=progress_printer_thread, args=[queue, len(tile_details)])
p.start()
pool.close()
pool.join() # Jobs finished
if not options.verbose and not options.quiet:
p.join() # Traces done
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
main()
# vim: set tabstop=4 shiftwidth=4 expandtab:
|
debug.py
|
import os
import sys
import threading
import time
import traceback
from debug_toolbar.panels import DebugPanel
from django.template.loader import render_to_string
from redis_models import CanvasRedis
from canvas import util
class RedisPanel(DebugPanel):
name = 'Redis'
has_content = True
def __init__(self, *args, **kwargs):
DebugPanel.__init__(self, *args, **kwargs)
self._start = len(CanvasRedis.commands)
def queries(self):
return CanvasRedis.commands[self._start:]
def nav_title(self):
return 'Redis'
def nav_subtitle(self):
q = self.queries()
count = len(q)
total_time = sum(time for host, port, db, time, stacktrace, command, size in q)
total_bytes = sum(size for host, port, db, time, stacktrace, command, size in q) / 1024.0
return '%(count)s commands in %(total_time)0.02fms (%(total_bytes)0.02fkb)' % locals()
def title(self):
return 'Redis Commands'
def url(self):
return ''
def content(self):
context = {
'redis_commands': self.queries(),
}
return render_to_string('widget/debug_redis_panel.django.html', context)
class StackMonitor(object):
singleton = None
@classmethod
def ensure(cls):
if not cls.singleton:
cls.singleton = StackMonitor()
return cls.singleton
def __init__(self):
self.interval = 0.01
self.pid = os.getpid()
self.output = file('/var/canvas/website/run/sample.%s.log' % self.pid, 'a')
self.output.write('spawned\n')
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
self.output.write("StackMonitor running\n")
while True:
time.sleep(self.interval)
self.sample()
def sample(self):
t = time.time()
frames = sys._current_frames()
my_frame = sys._getframe()
for thread, frame in frames.items():
if frame == my_frame:
continue
if '/gunicorn/' in frame.f_code.co_filename:
continue
header = "Stack Monitor pid: %s time: %s thread: %s\n" % (self.pid, t, thread)
self.output.write(header + "".join(traceback.format_stack(frame)))
|
app.py
|
import json
import logging
import multiprocessing as mp
import os
import signal
import sys
import threading
from logging.handlers import QueueHandler
from typing import Dict, List
import traceback
import yaml
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
from pydantic import ValidationError
from frigate.config import DetectorTypeEnum, FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.edgetpu import EdgeTPUProcess
from frigate.events import EventCleanup, EventProcessor
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event, Recordings
from frigate.mqtt import MqttSocketRelay, create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames
from frigate.record import RecordingCleanup, RecordingMaintainer
from frigate.stats import StatsEmitter, stats_init
from frigate.version import VERSION
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
logger = logging.getLogger(__name__)
class FrigateApp:
def __init__(self):
self.stop_event = mp.Event()
self.base_config: FrigateConfig = None
self.config: FrigateConfig = None
self.detection_queue = mp.Queue()
self.detectors: Dict[str, EdgeTPUProcess] = {}
self.detection_out_events: Dict[str, mp.Event] = {}
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
self.log_queue = mp.Queue()
self.camera_metrics = {}
def set_environment_vars(self):
for key, value in self.config.environment_vars.items():
os.environ[key] = value
def ensure_dirs(self):
for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}")
os.makedirs(d)
else:
logger.debug(f"Skipping directory: {d}")
def init_logger(self):
self.log_process = mp.Process(
target=log_process, args=(self.log_queue,), name="log_process"
)
self.log_process.daemon = True
self.log_process.start()
root_configurer(self.log_queue)
def init_config(self):
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
"camera_fps": mp.Value("d", 0.0),
"skipped_fps": mp.Value("d", 0.0),
"process_fps": mp.Value("d", 0.0),
"detection_enabled": mp.Value(
"i", self.config.cameras[camera_name].detect.enabled
),
"detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0),
"read_start": mp.Value("d", 0.0),
"ffmpeg_pid": mp.Value("i", 0),
"frame_queue": mp.Queue(maxsize=2),
}
def set_log_levels(self):
logging.getLogger().setLevel(self.config.logger.default.value.upper())
for log, level in self.config.logger.logs.items():
logging.getLogger(log).setLevel(level.value.upper())
if not "werkzeug" in self.config.logger.logs:
logging.getLogger("werkzeug").setLevel("ERROR")
def init_queues(self):
# Queues for clip processing
self.event_queue = mp.Queue()
self.event_processed_queue = mp.Queue()
self.video_output_queue = mp.Queue(maxsize=len(self.config.cameras.keys()) * 2)
# Queue for cameras to push tracked objects to
self.detected_frames_queue = mp.Queue(
maxsize=len(self.config.cameras.keys()) * 2
)
# Queue for recordings info
self.recordings_info_queue = mp.Queue()
def init_database(self):
# Migrate DB location
old_db_path = os.path.join(CLIPS_DIR, "frigate.db")
if not os.path.isfile(self.config.database.path) and os.path.isfile(
old_db_path
):
os.rename(old_db_path, self.config.database.path)
# Migrate DB schema
migrate_db = SqliteExtDatabase(self.config.database.path)
# Run migrations
del logging.getLogger("peewee_migrate").handlers[:]
router = Router(migrate_db)
router.run()
migrate_db.close()
self.db = SqliteQueueDatabase(self.config.database.path)
models = [Event, Recordings]
self.db.bind(models)
def init_stats(self):
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
self.flask_app = create_app(
self.config,
self.db,
self.stats_tracking,
self.detected_frames_processor,
)
def init_mqtt(self):
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
def start_mqtt_relay(self):
self.mqtt_relay = MqttSocketRelay(
self.mqtt_client, self.config.mqtt.topic_prefix
)
self.mqtt_relay.start()
def start_detectors(self):
model_path = self.config.model.path
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
try:
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
size=self.config.model.height * self.config.model.width * 3,
)
except FileExistsError:
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
except FileExistsError:
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector in self.config.detectors.items():
if detector.type == DetectorTypeEnum.cpu:
self.detectors[name] = EdgeTPUProcess(
name,
self.detection_queue,
self.detection_out_events,
model_path,
model_shape,
"cpu",
detector.num_threads,
)
if detector.type == DetectorTypeEnum.edgetpu:
self.detectors[name] = EdgeTPUProcess(
name,
self.detection_queue,
self.detection_out_events,
model_path,
model_shape,
detector.device,
detector.num_threads,
)
def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(
self.config,
self.mqtt_client,
self.config.mqtt.topic_prefix,
self.detected_frames_queue,
self.event_queue,
self.event_processed_queue,
self.video_output_queue,
self.recordings_info_queue,
self.stop_event,
)
self.detected_frames_processor.start()
def start_video_output_processor(self):
output_processor = mp.Process(
target=output_frames,
name=f"output_processor",
args=(
self.config,
self.video_output_queue,
),
)
output_processor.daemon = True
self.output_processor = output_processor
output_processor.start()
logger.info(f"Output process started: {output_processor.pid}")
def start_camera_processors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
camera_process = mp.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
model_shape,
self.config.model.merged_labelmap,
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
self.camera_metrics[name],
),
)
camera_process.daemon = True
self.camera_metrics[name]["process"] = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self):
for name, config in self.config.cameras.items():
capture_process = mp.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, self.camera_metrics[name]),
)
capture_process.daemon = True
self.camera_metrics[name]["capture_process"] = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_event_processor(self):
self.event_processor = EventProcessor(
self.config,
self.camera_metrics,
self.event_queue,
self.event_processed_queue,
self.stop_event,
)
self.event_processor.start()
def start_event_cleanup(self):
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(
self.config, self.recordings_info_queue, self.stop_event
)
self.recording_maintainer.start()
def start_recording_cleanup(self):
self.recording_cleanup = RecordingCleanup(self.config, self.stop_event)
self.recording_cleanup.start()
def start_stats_emitter(self):
self.stats_emitter = StatsEmitter(
self.config,
self.stats_tracking,
self.mqtt_client,
self.config.mqtt.topic_prefix,
self.stop_event,
)
self.stats_emitter.start()
def start_watchdog(self):
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
def start(self):
self.init_logger()
logger.info(f"Starting Frigate ({VERSION})")
try:
try:
self.init_config()
except Exception as e:
print("*************************************************************")
print("*************************************************************")
print("*** Your config file is not valid! ***")
print("*** Please check the docs at ***")
print("*** https://docs.frigate.video/configuration/index ***")
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************")
print(e)
print(traceback.format_exc())
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")
self.log_process.terminate()
sys.exit(1)
self.set_environment_vars()
self.ensure_dirs()
self.set_log_levels()
self.init_queues()
self.init_database()
self.init_mqtt()
except Exception as e:
print(e)
self.log_process.terminate()
sys.exit(1)
self.start_detectors()
self.start_video_output_processor()
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
self.init_stats()
self.init_web_server()
self.start_mqtt_relay()
self.start_event_processor()
self.start_event_cleanup()
self.start_recording_maintainer()
self.start_recording_cleanup()
self.start_stats_emitter()
self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
def receiveSignal(signalNumber, frame):
self.stop()
sys.exit()
signal.signal(signal.SIGTERM, receiveSignal)
try:
self.flask_app.run(host="127.0.0.1", port=5001, debug=False)
except KeyboardInterrupt:
pass
self.stop()
def stop(self):
logger.info(f"Stopping...")
self.stop_event.set()
self.mqtt_relay.stop()
self.detected_frames_processor.join()
self.event_processor.join()
self.event_cleanup.join()
self.recording_maintainer.join()
self.recording_cleanup.join()
self.stats_emitter.join()
self.frigate_watchdog.join()
self.db.stop()
for detector in self.detectors.values():
detector.stop()
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
|
e2e_throughput.py
|
#!/usr/bin/python3
from bigdl.serving.client import InputQueue, OutputQueue
from bigdl.common.encryption_utils import encrypt_with_AES_GCM
import os
import cv2
import json
import time
from optparse import OptionParser
import base64
from multiprocessing import Process
import redis
import yaml
import argparse
from numpy import *
RESULT_PREFIX = "cluster-serving_"
name = "serving_stream"
def main(args):
if args.image_num % args.proc_num != 0:
raise EOFError("Please make sure that image push number can be divided by multi-process number")
redis_args = {}
with open(args.config_path) as file:
config = yaml.full_load(file)
redis_url = config.get('redisUrl')
if redis_url:
host = redis_url.split(':')[0]
port = redis_url.split(':')[1]
redis_args = {'host': host, 'port': port}
if config.get('redisSecureEnabled'):
if not os.path.isdir(args.keydir):
raise EOFError("Please set secure key path")
redis_args['ssl'] = 'True'
redis_args['ssl_cert_reqs'] = 'none'
redis_args['ssl_certfile'] = redis_args['ssl_ca_certs'] = os.path.join(args.keydir, "server.crt")
redis_args['ssl_keyfile'] = os.path.join(args.keydir, "server.key")
encrypt = config.get('recordEncrypted')
DB = redis.StrictRedis(**redis_args)
redis_args.pop('ssl_cert_reqs', None)
try:
print("Entering initial dequeue")
output_api = OutputQueue(**redis_args)
start = time.time()
res = output_api.dequeue()
end = time.time()
print("Dequeued", len(res), "records in", end - start, "sec, dequeue fps:", len(res) / (end - start))
print("Initial dequeue completed")
except Exception:
print("Dequeue error encountered")
e2e_start = image_enqueue(redis_args, args.image_num, args.proc_num, args.image_path, encrypt)
e2e_end, dequeue_num, num_invalid = image_dequeue(DB, args.image_num)
num_valid = maximum(dequeue_num - num_invalid, 0)
duration = e2e_end - e2e_start
print("Served", num_valid, "images in", duration, "sec, e2e throughput is", num_valid / duration,
"images/sec, excluded", num_invalid, "invalid results")
def image_enqueue(redis_args, img_num, proc_num, path, encrypt):
print("Entering enqueue")
input_api = InputQueue(**redis_args)
img = cv2.imread(path)
img = cv2.resize(img, (224, 224))
data = cv2.imencode(".jpg", img)[1]
img_encoded = base64.b64encode(data).decode("utf-8")
if encrypt:
img_encoded = encrypt_with_AES_GCM(img_encoded, "secret", "salt")
print("Record encoded")
img_per_proc = int(img_num / proc_num)
procs = []
def push_image(image_num, index, proc_id):
print("Entering enqueue", proc_id)
for i in range(image_num):
input_api.enqueue("my-img-" + str(i + index), t={"b64": img_encoded})
start = time.time()
for i in range(proc_num):
proc = Process(target=push_image, args=(img_per_proc, i * img_per_proc, i,))
procs.append(proc)
proc.start()
for p in procs:
p.join()
end = time.time()
print(img_num, "images enqueued")
print("total enqueue time:", end - start)
fps = img_num / (end - start)
print("enqueue fps:", fps)
return start
def image_dequeue(DB, img_num):
print("Entering dequeue")
dequeue_num = 0
num_invalid = 0
start = time.time()
while dequeue_num < img_num:
pipe = DB.pipeline()
res_list = DB.keys(RESULT_PREFIX + name + ':*')
for res in res_list:
pipe.hgetall(res.decode('utf-8'))
res_dict_list = pipe.execute()
for res_dict in res_dict_list:
try:
res_val = res_dict[b'value'].decode('utf-8')
except Exception:
print("Irregular result dict:", res_dict)
num_invalid += 1
continue
if res_val == 'NaN':
num_invalid += 1
num_res = len(res_list)
if num_res > 0:
dequeue_num += num_res
print("Received", dequeue_num, "results, including", num_invalid, "invalid results")
DB.delete(*res_list)
print("Total dequeue time:", time.time() - start)
return time.time(), dequeue_num, num_invalid
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', '-c', help='path of cluster serving config.yaml', default='../../config.yaml')
parser.add_argument('--image_path', '-i', help='path of test image', default='ILSVRC2012_val_00000001.JPEG')
parser.add_argument('--image_num', '-n', type=int, help='number of iterations to push image', default=1000)
parser.add_argument('--proc_num', '-p', type=int, help='number of procs', default=10)
parser.add_argument('--keydir', '-k', help='key files directory path', default='../keys')
args = parser.parse_args()
main(args)
|
server.py
|
#!/usr/bin/env python
import zmq
import time
import dill
import logging
import threading
import numpy as np
import ami.comm
from ami import LogConfig
from ami.export.nt import NTBytes, NTObject, NTGraph, NTStore
from p4p.nt import NTScalar, NTNDArray
from p4p.server import Server, StaticProvider
from p4p.server.thread import SharedPV
from p4p.rpc import rpc, NTURIDispatcher
from p4p.util import ThreadedWorkQueue
logger = logging.getLogger(LogConfig.get_package_name(__name__))
class PvaExportPutHandler:
def __init__(self, put=None, rpc=None):
self._put = put
self._rpc = rpc
def put(self, pv, op):
if self._put is not None:
self._put(pv, op)
def rpc(self, pv, op):
if self._rpc is not None:
self._rpc(pv, op)
class PvaExportRpcHandler:
def __init__(self, ctx, addr):
self.ctx = ctx
self.addr = addr
self.comms = {}
def _get_comm(self, graph):
if graph not in self.comms:
self.comms[graph] = ami.comm.GraphCommHandler(graph, self.addr, ctx=self.ctx)
return self.comms[graph]
@rpc(NTScalar('?'))
def create(self, graph):
return self._get_comm(graph).create()
@rpc(NTScalar('?'))
def destroy(self, graph):
return self._get_comm(graph).destroy()
@rpc(NTScalar('?'))
def clear(self, graph):
return self._get_comm(graph).clear()
@rpc(NTScalar('?'))
def reset(self, graph):
return self._get_comm(graph).reset()
@rpc(NTScalar('?'))
def post(self, graph, topic, payload):
return self._get_comm(graph)._post_dill(topic, dill.loads(payload.tobytes()))
@rpc(NTScalar('as'))
def names(self, graph):
return self._get_comm(graph).names
@rpc(NTScalar('?'))
def view(self, graph, name):
return self._get_comm(graph).view(name)
@rpc(NTScalar('?'))
def export(self, graph, name, alias):
return self._get_comm(graph).export(name, alias)
class PvaExportServer:
def __init__(self, name, comm_addr, export_addr, aggregate=False):
self.base = name
self.ctx = zmq.Context()
self.export = self.ctx.socket(zmq.SUB)
self.export.setsockopt_string(zmq.SUBSCRIBE, "")
self.export.connect(export_addr)
self.comm = self.ctx.socket(zmq.REQ)
self.comm.connect(comm_addr)
self.queue = ThreadedWorkQueue(maxsize=20, workers=1)
# pva server provider
self.provider = StaticProvider(name)
self.rpc_provider = NTURIDispatcher(self.queue,
target=PvaExportRpcHandler(self.ctx, comm_addr),
name="%s:cmd" % self.base,
prefix="%s:cmd:" % self.base)
self.server_thread = threading.Thread(target=self.server, name='pvaserv')
self.server_thread.daemon = True
self.aggregate = aggregate
self.pvs = {}
self.ignored = set()
self.graph_pvbase = "ana"
self.data_pvbase = "data"
self.info_pvbase = "info"
self.cmd_pvs = {'command'}
self.payload_cmd_pvs = {'add', 'set', 'del'}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
self.ctx.destroy()
@staticmethod
def join_pv(*args):
return ":".join(args)
def graph_pvname(self, graph, name=None):
if name is not None:
return ":".join([self.graph_pvbase, graph, name])
else:
return ":".join([self.graph_pvbase, graph])
def data_pvname(self, graph, name):
return ":".join([self.graph_pvbase, graph, self.data_pvbase, name])
def info_pvname(self, name):
return ":".join([self.info_pvbase, name])
def find_graph_pvnames(self, graph, names):
return [name for name in names if name.startswith(self.graph_pvname(graph))]
def create_pv(self, name, nt, initial, func=None):
if func is not None:
pv = SharedPV(nt=nt, initial=initial, handler=PvaExportPutHandler(put=func))
else:
pv = SharedPV(nt=nt, initial=initial)
self.provider.add('%s:%s' % (self.base, name), pv)
self.pvs[name] = pv
def create_bytes_pv(self, name, initial, func=None):
self.create_pv(name, NTBytes(), initial, func=func)
def valid(self, name, group=None):
return not name.startswith('_')
def get_pv_type(self, data):
if isinstance(data, np.ndarray):
return NTNDArray()
elif isinstance(data, bool):
return NTScalar('?')
elif isinstance(data, int):
return NTScalar('l')
elif isinstance(data, float):
return NTScalar('d')
else:
return NTObject()
def update_graph(self, graph, data):
# add the unaggregated version of the pvs
for key, value in data.items():
if key in NTGraph.flat_schema:
name, nttype = NTGraph.flat_schema[key]
pvname = self.graph_pvname(graph, name)
if pvname not in self.pvs:
self.create_pv(pvname, nttype, value)
else:
self.pvs[pvname].post(value)
# add the aggregated graph pv if requested
if self.aggregate:
pvname = self.graph_pvname(graph)
if pvname not in self.pvs:
logger.debug("Creating pv for info on the graph")
self.create_pv(pvname, NTGraph(), data)
else:
self.pvs[pvname].post(data)
def update_store(self, graph, data):
# add the unaggregated version of the pvs
for key, value in data.items():
if key in NTStore.flat_schema:
name, nttype = NTStore.flat_schema[key]
pvname = self.graph_pvname(graph, name)
if pvname not in self.pvs:
self.create_pv(pvname, nttype, value)
else:
self.pvs[pvname].post(value)
# add the aggregated graph pv if requested
if self.aggregate:
pvname = self.graph_pvname(graph, 'store')
if pvname not in self.pvs:
logger.debug("Creating pv for info on the store")
self.create_pv(pvname, NTStore(), data)
else:
self.pvs[pvname].post(data)
def update_heartbeat(self, graph, heartbeat):
pvname = self.graph_pvname(graph, 'heartbeat')
if pvname not in self.pvs:
self.create_pv(pvname, NTScalar('d'), heartbeat.identity)
else:
self.pvs[pvname].post(heartbeat.identity)
def update_info(self, data):
# add the unaggregated version of the pvs
for key, value in data.items():
pvname = self.info_pvname(key)
if pvname not in self.pvs:
self.create_pv(pvname, NTScalar('as'), value)
else:
self.pvs[pvname].post(value)
def update_data(self, graph, name, data):
pvname = self.data_pvname(graph, name)
if pvname not in self.ignored:
if pvname not in self.pvs:
pv_type = self.get_pv_type(data)
if pv_type is not None:
logger.debug("Creating new pv named %s for graph %s", name, graph)
self.create_pv(pvname, pv_type, data)
else:
logger.warn("Cannot map type of '%s' from graph '%s' to PV: %s", name, graph, type(data))
self.ignored.add(pvname)
else:
self.pvs[pvname].post(data)
def update_destroy(self, graph):
# close all the pvs associated with the purged graph
for name in self.find_graph_pvnames(graph, self.pvs):
logger.debug("Removing pv named %s for graph %s", name, graph)
self.provider.remove('%s:%s' % (self.base, name))
del self.pvs[name]
# remove any ignored pvs associated with the purged graph
for name in self.find_graph_pvnames(graph, self.ignored):
self.ignored.remove(name)
def server(self):
server = Server(providers=[self.provider, self.rpc_provider])
with server, self.queue:
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
def run(self):
# start the pva server thread
self.server_thread.start()
logger.info("Starting PVA data export server")
while True:
topic = self.export.recv_string()
graph = self.export.recv_string()
exports = self.export.recv_pyobj()
if topic == 'data':
for name, data in exports.items():
# ignore names starting with '_' - these are private
if self.valid(name):
self.update_data(graph, name, data)
elif topic == 'graph':
self.update_graph(graph, exports)
elif topic == 'store':
self.update_store(graph, exports)
elif topic == 'heartbeat':
self.update_heartbeat(graph, exports)
elif topic == 'info':
self.update_info(exports)
elif topic == 'destroy':
self.update_destroy(graph)
else:
logger.warn("No handler for topic: %s", topic)
|
mockradio.py
|
#!/usr/bin/python
#Temporary mock radio test layer
import socket
import serial
import SocketServer
import threading
import Queue
import binascii
import struct
msgQueue = Queue.Queue()
class ConnectHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.settimeout(30)
self.data = self.request.recv(1024)
print("RECEIVED: %d" % len(self.data))
msgQueue.put(self.data)
except socket.timeout:
self.request.close()
return
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
pass
def checksum(msg):
checksum = 0
for byte in msg:
checksum += ord(byte)
return struct.pack(">B", checksum % 256)
if __name__ == "__main__":
server = Server( ("localhost", 8000), ConnectHandler )
t = threading.Thread(target=server.serve_forever)
t.setDaemon( True ) # don't hang on exit
t.start()
ser = serial.Serial("/dev/ttyUSB1", 115200, timeout=0, parity=serial.PARITY_NONE, rtscts=0)
try:
ser.open()
if ( ser.isOpen == False ):
print "Failed to open serial console"
exit()
except Exception, e:
print "Serial Error : " + str(e)
exit()
while ( True ):
if not (msgQueue.empty()):
msg = msgQueue.get()
if (len(msg)< 86):
msg += "\x00" *((86 - len(msg)))
cs = checksum(msg)
msg = binascii.hexlify(msg)
msg = "VP:" + msg + "," + binascii.hexlify(cs)
print "Sending: " + msg + "\n"
ser.write(msg)
msgQueue.task_done()
# Print output if present
print ser.read(1024)
server.socket.close()
|
__init__.py
|
# -*- coding: UTF-8 -*-
#virtualBuffers/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2017 NV Access Limited, Peter Vágner
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
from abc import abstractmethod
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(str):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
# Symbols that are escaped in the attributes string.
ord(u":"): r"\\:",
ord(u";"): r"\\;",
ord(u"\\"): u"\\\\\\\\",
}
# Symbols that must be escaped for a regular expression.
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
# A lambda that coerces a value to a string and escapes characters suitable for a regular expression.
escape = lambda val: str(val).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
# Single option.
attribs = (attribs,)
# All options will match against all requested attributes,
# so first build the list of requested attributes.
for option in attribs:
for name in option:
reqAttrs.append(name)
# Now build the regular expression.
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
# The value isn't tested for this attribute, so match any (or no) value.
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches or None (must not exist).
optRegexp.append("(?:" )
optRegexp.append("|".join((escape(val)+u';') if val is not None else u';' for val in values))
optRegexp.append(")")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def obj(self):
return self.document.getNVDAObjectFromIdentifier(*self.vbufFieldIdentifier)
@property
def label(self):
attrs = {}
def propertyGetter(prop):
if not attrs:
# Lazily fetch the attributes the first time they're needed.
# We do this because we don't want to do this if they're not needed at all.
attrs.update(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1]))
return attrs.get(prop)
return self._getLabelForProperties(propertyGetter)
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
class VirtualBufferTextInfo(browseMode.BrowseModeDocumentTextInfo,textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False #: no need for end insertion point as vbuf is not editable.
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
if not any((docHandle.value, ID.value)):
raise LookupError("Neither docHandle nor ID found for offset %d" % offset)
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getBoundingRectFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
if not o:
raise LookupError("no NVDAObject at offset %d" % offset)
if o.hasIrrelevantLocation:
raise LookupError("Object is off screen, invisible or has no location")
return o.location
def _getNVDAObjectFromOffset(self,offset):
try:
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
except LookupError:
log.debugWarning("Couldn't get NVDAObject from offset %d" % offset)
return None
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
# Use the container in this case.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def _getPlaceholderAttribute(self, attrs, placeholderAttrsKey):
"""Gets the placeholder attribute to be used.
@return: The placeholder attribute when there is no content within the ControlField.
None when the ControlField has content.
@note: The content is considered empty if it holds a single space.
"""
placeholder = attrs.get(placeholderAttrsKey)
# For efficiency, only check if it is valid to return placeholder when we have a placeholder value to return.
if not placeholder:
return None
# Get the start and end offsets for the field. This can be used to check if the field has any content.
try:
start, end = self._getOffsetsFromFieldIdentifier(
int(attrs.get('controlIdentifier_docHandle')),
int(attrs.get('controlIdentifier_ID')))
except (LookupError, ValueError):
log.debugWarning("unable to get offsets used to fetch content")
return placeholder
else:
valueLen = end - start
if not valueLen: # value is empty, use placeholder
return placeholder
# Because fetching the content of the field could result in a large amount of text
# we only do it in order to check for space.
# We first compare the length by comparing the offsets, if the length is less than 2 (ie
# could hold space)
if valueLen < 2:
controlFieldText = self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text
if not controlFieldText or controlFieldText == ' ':
return placeholder
return None
def _getFieldsInRange(self,start,end):
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in range(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
return self._getFieldsInRange(start,end)
def _getWordOffsets(self,offset):
#Use VBuf_getBufferLineOffsets with out screen layout to find out the range of the current field
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
# convert some table attributes to ints
for attr in ("table-id","table-rownumber","table-columnnumber","table-rowsspanned","table-columnsspanned"):
attrVal=attrs.get(attr)
if attrVal is not None:
attrs[attr]=int(attrVal)
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
# Handle table row and column headers.
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
# Get the text for the header cells.
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("role") in (controlTypes.ROLE_LANDMARK, controlTypes.ROLE_REGION):
attrs['alwaysReportName'] = True
# Expose a unique ID on the controlField for quick and safe comparison using the virtualBuffer field's docHandle and ID
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
strippedCharsFromStart = attrs.get("strippedCharsFromStart")
if strippedCharsFromStart is not None:
assert strippedCharsFromStart.isdigit(), "strippedCharsFromStart isn't a digit, %r" % strippedCharsFromStart
attrs["strippedCharsFromStart"] = int(strippedCharsFromStart)
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == textInfos.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
elif unit == textInfos.UNIT_FORMATFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateTextFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Blocks should start on a new line, but they don't necessarily have an end of line indicator.
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def activate(self):
self.obj._activatePosition(info=self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
def prepare(self):
if not self.rootNVDAObject.appModule.helperLocalBindingHandle:
# #5758: If NVDA starts with a document already in focus, there will have been no focus event to inject nvdaHelper yet.
# So at very least don't try to prepare a virtualBuffer as it will fail.
# The user will most likely need to manually move focus away and back again to allow this virtualBuffer to work.
log.debugWarning("appModule has no binding handle to injected code, can't prepare virtualBuffer yet.")
return
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
super(VirtualBuffer,self).terminate()
if not self.VBufHandle:
return
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(
name=f"{self.__class__.__module__}.{self.loadBuffer.__qualname__}",
target=self._loadBuffer).start(
)
def _loadBuffer(self):
try:
if log.isEnabledFor(log.DEBUG):
startTime = time.time()
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(
self.rootNVDAObject.appModule.helperLocalBindingHandle,
self.rootDocHandle,self.rootID,
self.backendName
)
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
if log.isEnabledFor(log.DEBUG):
log.debug("Buffer load took %.3f sec, %d chars" % (
time.time() - startTime,
NVDAHelper.localLib.VBuf_getTextLength(self.VBufHandle)))
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
ui.message(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=str(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
@abstractmethod
def getNVDAObjectFromIdentifier(self, docHandle, ID):
"""Retrieve an NVDAObject for a given node identifier.
Subclasses must override this method.
@param docHandle: The document handle.
@type docHandle: int
@param ID: The ID of the node.
@type ID: int
@return: The NVDAObject.
@rtype: L{NVDAObjects.NVDAObject}
"""
raise NotImplementedError
@abstractmethod
def getIdentifierFromNVDAObject(self,obj):
"""Retreaves the virtualBuffer field identifier from an NVDAObject.
@param obj: the NVDAObject to retreave the field identifier from.
@type obj: L{NVDAObject}
@returns: a the field identifier as a doc handle and ID paire.
@rtype: 2-tuple.
"""
raise NotImplementedError
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
# Translators: the description for the refreshBuffer script on virtualBuffers.
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout on"))
else:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout off"))
# Translators: the description for the toggleScreenLayout script on virtualBuffers.
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
raise NotImplementedError
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def _getTableCellAt(self,tableID,startPos,row,column):
try:
return next(self._iterTableCells(tableID,row=row,column=column))
except StopIteration:
raise LookupError
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
# row could be 0.
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
# The first match will be the table itself, so skip it.
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
# Determine destination row and column.
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
# Optimisation: We're definitely at the edge of the column.
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
# In most cases, there's nothing more to try.
raise LookupError
else:
# We're moving forward by column.
# In this case, there might be a cell on an earlier row which spans multiple rows.
# Therefore, try searching backwards.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _isSuitableNotLinkBlock(self, textRange):
return (textRange._endOffset - textRange._startOffset) >= self.NOT_LINK_BLOCK_MIN_LEN
def getEnclosingContainerRange(self, textRange):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in textRange.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER or field.get("landmark"):
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets = textRange._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
"""Handle an update to this buffer.
"""
if not self.VBufHandle:
# #4859: The buffer was unloaded after this method was queued.
return
braille.handler.handleUpdate(self)
def getControlFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = str(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
def _isNVDAObjectInApplication_noWalk(self, obj):
inApp = super(VirtualBuffer, self)._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return inApp
# If the object is in the buffer, it's definitely not in an application.
try:
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
except:
log.debugWarning("getIdentifierFromNVDAObject failed. "
"Object probably died while walking ancestors.", exc_info=True)
return None
node = VBufRemote_nodeHandle_t()
if not self.VBufHandle:
return None
try:
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.VBufHandle, docHandle, objId,ctypes.byref(node))
except WindowsError:
return None
if node:
return False
return None
__gestures = {
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
}
|
__init__.py
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder.python_utils import patch_mp
patch_mp()
import sys # noqa: E402
from textwrap import dedent # noqa: E402
from types import (MethodType, # noqa: E402
FunctionType,
BuiltinFunctionType,
)
try:
from types import (WrapperDescriptorType, # noqa: E402
MethodWrapperType,
MethodDescriptorType,
ClassMethodDescriptorType,
)
except ImportError:
WrapperDescriptorType = type(object.__init__)
MethodWrapperType = type(object().__str__)
MethodDescriptorType = type(str.join)
ClassMethodDescriptorType = type(dict.__dict__['fromkeys'])
from os.path import normcase as nc, sep # noqa: E402
from io import BytesIO, StringIO # noqa: E402
from pickle import PickleError, Unpickler, UnpicklingError, HIGHEST_PROTOCOL # noqa: E402
from pickletools import dis # noqa: E402
from pybuilder.python_utils import (mp_get_context, # noqa: E402
mp_ForkingPickler as ForkingPickler,
mp_log_to_stderr as log_to_stderr,
PY2,
IS_WIN)
PICKLE_PROTOCOL_MIN = 2
PICKLE_PROTOCOL_MAX = HIGHEST_PROTOCOL
CALLABLE_TYPES = (MethodType,
FunctionType,
BuiltinFunctionType,
MethodWrapperType,
ClassMethodDescriptorType,
WrapperDescriptorType,
MethodDescriptorType,
)
if PY2:
ConnectionError = EnvironmentError
_compat_pickle = None
else:
import _compat_pickle
ctx = mp_get_context("spawn")
ctx.allow_connection_pickling()
logger = ctx.get_logger()
__all__ = ["RemoteObjectPipe", "RemoteObjectError",
"Process", "proxy_members", "PipeShutdownError", "log_to_stderr"]
BUILTIN_MODULES = set(sys.builtin_module_names)
class Process:
def __init__(self, pyenv, group=None, target=None, name=None, args=None):
self.pyenv = pyenv
self.proc = ctx.Process(group=group, target=target, name=name, args=args)
def start(self):
pyenv = self.pyenv
if PY2:
if IS_WIN:
from multiprocessing import forking as patch_module
tracker = None
else:
from billiard import spawn as patch_module
from billiard import semaphore_tracker as tracker
else:
from multiprocessing import spawn as patch_module
if not IS_WIN:
try:
from multiprocessing import semaphore_tracker as tracker
except ImportError:
from multiprocessing import resource_tracker as tracker
else:
tracker = None
# This is done to prevent polluting tracker's path with our path magic
if tracker:
tracker.getfd()
old_python_exe = patch_module._python_exe
patch_module._python_exe = pyenv.executable[0] # pyenv's actual sys.executable
old_get_command_line = patch_module.get_command_line
def patched_get_command_line(**kwds):
cmd_line = old_get_command_line(**kwds)
result = list(pyenv.executable) + cmd_line[1:]
logger.debug("Starting process with %r", result)
return result
patch_module.get_command_line = patched_get_command_line
old_preparation_data = patch_module.get_preparation_data
def patched_preparation_data(name):
d = old_preparation_data(name)
sys_path = d["sys_path"]
# Python 2
if sys_path is sys.path:
sys_path = list(sys_path)
d["sys_path"] = sys_path
exec_prefix = nc(sys.exec_prefix) + sep
trailing_paths = []
for idx, path in enumerate(sys_path):
nc_path = nc(path)
if nc_path.startswith(exec_prefix):
sys_path[idx] = pyenv.env_dir + sep + path[len(exec_prefix):]
trailing_paths.append(path)
# Push current exec_prefix paths to the very end
sys_path.extend(trailing_paths)
logger.debug("Process sys.path will be: %r", sys_path)
return d
patch_module.get_preparation_data = patched_preparation_data
try:
return self.proc.start()
finally:
patch_module._python_exe = old_python_exe
patch_module.get_command_line = old_get_command_line
patch_module.get_preparation_data = old_preparation_data
def terminate(self):
return self.proc.terminate()
def kill(self):
return self.proc.kill()
def join(self, timeout=None):
return self.proc.join(timeout)
def is_alive(self):
return self.proc.is_alive()
def close(self):
return self.proc.close()
@property
def name(self):
return self.proc.name
@name.setter
def name(self, name):
self.proc.name = name
@property
def daemon(self):
return self.proc.daemon
@daemon.setter
def daemon(self, daemonic):
self.proc.daemon = daemonic
@property
def authkey(self):
return self.proc.authkey
@authkey.setter
def authkey(self, authkey):
self.proc.authkey = authkey
@property
def exitcode(self):
return self.proc.exitcode
@property
def ident(self):
return self.proc.ident
pid = ident
@property
def sentinel(self):
return self.proc.sentinel
def __repr__(self):
return repr(self.proc)
class ProxyDef:
def __init__(self, remote_id, module_name, type_name, is_type, methods, fields, spec_fields):
self.remote_id = remote_id
self.module_name = module_name
self.type_name = type_name
self.is_type = is_type
self.methods = methods
self.fields = fields
self.spec_fields = spec_fields
def __repr__(self):
return "ProxyDef[remote_id=%r, module_name=%r, type_name=%r, is_type=%r," \
" methods=%r, fields=%r, spec_fields=%r]" % (self.remote_id,
self.module_name,
self.type_name,
self.is_type,
self.methods,
self.fields,
self.spec_fields)
def make_proxy_type(self):
"""
Return a proxy type whose methods are given by `exposed`
"""
remote_id = self.remote_id
methods = tuple(self.methods)
fields = tuple(self.fields)
dic = {}
body = ""
for meth in methods:
body += dedent("""
def %s(self, *args, **kwargs):
return self._BaseProxy__rop.call(%r, %r, args, kwargs)""" % (meth, remote_id, meth))
for field in fields:
body += dedent("""
def %s_getter(self):
return self._BaseProxy__rop.call_getattr(%r, %r)
def %s_setter(self, value):
return self._BaseProxy__rop.call_setattr(%r, %r, value)
%s = property(%s_getter, %s_setter)""" % (field, remote_id, field, field,
remote_id, field, field, field, field))
exec(body, dic)
if self.is_type:
proxy_type = type(self.type_name, (_BaseProxyType, object), dic)
else:
proxy_type = type(self.type_name, (_BaseProxy, object), dic)
proxy_type.__module__ = self.module_name
proxy_type.__name__ = self.type_name
proxy_type.__methods__ = methods
proxy_type.__fields__ = fields
return proxy_type
PICKLE_PID_TYPE_REMOTE_OBJ = 0
PICKLE_PID_TYPE_REMOTE_BACKREF = 1
PICKLE_PID_TYPE_REMOTE_EXC_TB = 5
class RemoteObjectUnpickler(Unpickler, object):
def __init__(self, *args, **kwargs):
self._rop = kwargs.pop("_rop") # type: _RemoteObjectPipe
super(RemoteObjectUnpickler, self).__init__(*args, **kwargs)
def persistent_load(self, pid):
if isinstance(pid, tuple):
remote_type = pid[0]
if remote_type == PICKLE_PID_TYPE_REMOTE_OBJ:
remote_id = pid[1]
proxy_def = self._rop.get_remote_proxy_def(remote_id)
return self._rop.get_remote_proxy(proxy_def)
if remote_type == PICKLE_PID_TYPE_REMOTE_BACKREF:
remote_id = pid[1]
return self._rop.get_remote_obj_by_id(remote_id)
if remote_type == PICKLE_PID_TYPE_REMOTE_EXC_TB:
exc_payload = pid[1]
return rebuild_exception(*exc_payload)
raise UnpicklingError("unsupported persistent id encountered: %r" % pid)
@classmethod
def loads(cls, buf, _rop, *args, **kwargs):
f = BytesIO(buf)
return cls(f, *args, _rop=_rop, **kwargs).load()
_PICKLE_SKIP_PID_CHECK_TYPES = {type(None), bool, int, float, complex, str, bytes, bytearray, list, tuple, dict, set}
class RemoteObjectPickler(ForkingPickler, object):
def __init__(self, *args, **kwargs):
self._rop = kwargs.pop("_rop") # type: _RemoteObjectPipe
self._verify_types = set()
self.exc_persisted = []
if PY2:
kwargs["protocol"] = self._rop.pickle_version # This is for full backwards compatibility with Python 2
super(RemoteObjectPickler, self).__init__(*args, **kwargs)
else:
super(RemoteObjectPickler, self).__init__(args[0], self._rop.pickle_version, *args[1:])
def persistent_id(self, obj): # Mutable default is intentional here
t_obj = obj if isinstance(obj, type) else type(obj)
if t_obj in _PICKLE_SKIP_PID_CHECK_TYPES:
return None
exc_persisted = self.exc_persisted
# This is a weird trick.
if obj in exc_persisted:
exc_persisted.remove(obj)
return None
if isinstance(obj, _BaseProxy):
return PICKLE_PID_TYPE_REMOTE_BACKREF, obj._BaseProxy__proxy_def.remote_id
if issubclass(t_obj, _BaseProxyType):
return PICKLE_PID_TYPE_REMOTE_BACKREF, t_obj._BaseProxy__proxy_def.remote_id
rop = self._rop
remote_obj = rop.get_remote(obj)
if remote_obj is not None:
return PICKLE_PID_TYPE_REMOTE_OBJ, remote_obj.remote_id
if isinstance(obj, BaseException):
exc_persisted.append(obj)
return PICKLE_PID_TYPE_REMOTE_EXC_TB, reduce_exception(obj) # exception with traceback
if t_obj not in rop._verified_types:
if t_obj.__module__ not in BUILTIN_MODULES:
self._verify_types.add((t_obj.__module__, t_obj.__name__))
return None
@classmethod
def dumps(cls, obj, _rop, *args, **kwargs):
buf = BytesIO()
pickler = cls(buf, *args, _rop=_rop, **kwargs)
pickler.dump(obj)
if logger.getEffectiveLevel() == 1:
buf.seek(0)
dis_log = StringIO()
dis(buf, dis_log)
logger.debug(dis_log.getvalue())
return buf.getvalue(), pickler._verify_types
def rebuild_exception(ex, tb):
if tb:
setattr(ex, "__traceback__", tb)
return ex
def reduce_exception(ex):
return ex, getattr(ex, "__traceback__", None)
def proxy_members(obj, public=True, protected=True, add_callable=True, add_str=True):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
methods = []
fields = []
spec_fields = {}
for name in dir(obj):
is_public = name[0] != '_'
is_protected = name[0] == '_' and (len(name) == 1 or name[1] != '_')
is_callable = name == "__call__"
is_str = name == "__str__"
if (public and is_public or
protected and is_protected or
is_callable and add_callable or
is_str and add_str):
member = getattr(obj, name)
if isinstance(member, CALLABLE_TYPES):
methods.append(name)
else:
fields.append(name)
if isinstance(obj, BaseException):
if hasattr(obj, "__cause__"):
fields.append("__cause__")
if hasattr(obj, "__traceback__"):
fields.append("__traceback__")
if hasattr(obj, "__context__"):
fields.append("__context__")
if hasattr(obj, "__suppress_context__"):
fields.append("__suppress_context__")
if isinstance(obj, type) and not PY2:
spec_fields["__qualname__"] = obj.__qualname__
return methods, fields, spec_fields
class RemoteObjectPipe:
def expose(self, name, obj, methods=None, fields=None, remote=True):
"""Same as `RemoteObjectManager.expose`"""
raise NotImplementedError
def hide(self, name):
"""Same as `RemoteObjectManager.hide`"""
raise NotImplementedError
def register_remote(self, obj, methods=None, fields=None):
"""Same as `RemoteObjectManager.register_remote`"""
raise NotImplementedError
def register_remote_type(self, t):
"""Same as `RemoteObjectManager.register_remote_type`"""
raise NotImplementedError
def receive(self):
"""Listens for incoming remote requests"""
raise NotImplementedError
def close(self, exc=None):
"""Closes current pipe.
You can optionally attach an exception to pass to the other end of the pipe."""
raise NotImplementedError
def remote_close_cause(self):
raise NotImplementedError
@classmethod
def new_pipe(cls):
return _RemoteObjectSession().new_pipe()
def obj_id(obj):
if isinstance(obj, type):
return obj, id(obj)
return type(obj), id(obj)
class id_dict(dict):
def __getitem__(self, key):
key = id(key)
return super(id_dict, self).__getitem__(key)[1]
def get(self, key, default=None):
key = id(key)
val = super(id_dict, self).get(key, default)
if val is default:
return val
return val[1]
def __setitem__(self, key, value):
obj = key
key = id(key)
return super(id_dict, self).__setitem__(key, (obj, value))
def __delitem__(self, key):
key = id(key)
return super(id_dict, self).__delitem__(key)
def __contains__(self, key):
return super(id_dict, self).__contains__(id(key))
def keys(self):
for k, v in super(id_dict, self).items():
yield v[0]
def values(self):
for k, v in super(id_dict, self).items():
yield v[1]
def items(self):
for k, v in super(id_dict, self).items():
yield v[0], v[1]
def __iter__(self):
for k, v in super(id_dict, self).items():
yield v[0]
class _RemoteObjectSession:
def __init__(self):
self._remote_id = 0
# Mapping name (str): object
self._exposed_objs = dict()
# Mapping remote ID (int): object
self._remote_objs_ids = dict()
# Mapping object: ProxyDef
self._remote_objs = id_dict() # instances to be proxied
# All types to be always proxied
self._remote_types = set() # classes to be proxied
def new_pipe(self):
# type:(object) -> _RemoteObjectPipe
return _RemoteObjectPipe(self)
def expose(self, name, obj, remote=True, methods=None, fields=None):
exposed_objs = self._exposed_objs
if name in exposed_objs:
raise ValueError("%r is already exposed" % name)
exposed_objs[name] = obj
if remote:
self.register_remote(obj, methods, fields)
def hide(self, name):
self._exposed_objs.pop(name)
def register_remote(self, obj, methods=None, fields=None, spec_fields=None):
remote_id = self._remote_id
self._remote_id = remote_id + 1
if methods is None or fields is None or spec_fields is None:
obj_methods, obj_fields, obj_spec_fields = proxy_members(obj)
if methods is None:
methods = obj_methods
if fields is None:
fields = obj_fields
if spec_fields is None:
spec_fields = obj_spec_fields
if isinstance(obj, type):
obj_type = obj
else:
obj_type = type(obj)
proxy = ProxyDef(remote_id, obj_type.__module__, obj_type.__name__, obj_type is obj,
methods, fields, spec_fields)
self._remote_objs[obj] = proxy
self._remote_objs_ids[remote_id] = obj
logger.debug("registered proxy %r for %r", proxy, obj_id(obj))
return proxy
def register_remote_type(self, t):
self._remote_types.add(t)
def get_exposed_by_name(self, name):
return self._exposed_objs.get(name, None)
def get_proxy_by_name(self, name):
obj = self._exposed_objs.get(name, None)
if obj is not None:
return self._remote_objs[obj]
return None
def get_proxy_by_id(self, remote_id):
obj = self._remote_objs_ids.get(remote_id, None)
if obj is None:
return
return self._remote_objs[obj]
def get_remote_obj_by_id(self, remote_id):
return self._remote_objs_ids.get(remote_id, None)
def get_remote(self, obj):
try:
proxy_def = self._remote_objs.get(obj, None)
except TypeError:
return None
if proxy_def:
return proxy_def
if not isinstance(obj, type):
t_obj = type(obj)
if t_obj in self._remote_types:
logger.debug("%r is instance of type %r, which will register as remote", obj_id(obj), t_obj)
return self.register_remote(obj)
for remote_type in self._remote_types:
if isinstance(obj, remote_type):
logger.debug("%r is instance of type %r, which will register as remote", obj_id(obj), remote_type)
return self.register_remote(obj)
else:
if obj in self._remote_types:
logger.debug("%r will register as remote", obj)
return self.register_remote(obj)
for remote_type in self._remote_types:
if issubclass(obj, remote_type):
logger.debug("%r is subtype of type %r, which will register as remote", obj_id(obj), remote_type)
return self.register_remote(obj)
class RemoteObjectError(Exception):
pass
class PipeShutdownError(RemoteObjectError):
def __init__(self, cause=None):
self.cause = cause
class _BaseProxy:
def __init__(self, __rop, __proxy_def):
self.__rop = __rop
self.__proxy_def = __proxy_def
class _BaseProxyType:
pass
ROP_CLOSE = 0
ROP_CLOSE_CLOSED = 1
ROP_PICKLE_VERSION = 2
ROP_GET_EXPOSED = 5
ROP_GET_EXPOSED_RESULT = 6
ROP_GET_PROXY_DEF = 7
ROP_GET_PROXY_DEF_RESULT = 8
ROP_VERIFY_TYPES = 9
ROP_VERIFY_TYPES_RESULT = 10
ROP_REMOTE_ACTION = 20
ROP_REMOTE_ACTION_CALL = 21
ROP_REMOTE_ACTION_GETATTR = 22
ROP_REMOTE_ACTION_SETATTR = 23
ROP_REMOTE_ACTION_REMOTE_ERROR = 24
ROP_REMOTE_ACTION_RETURN = 25
ROP_REMOTE_ACTION_EXCEPTION = 26
class _RemoteObjectPipe(RemoteObjectPipe):
def __init__(self, ros):
self._returns_pending = 0
self._remote_close_cause = None
self._ros = ros # type: _RemoteObjectSession
self._conn_c = self._conn_p = None
self._remote_proxy_defs = {}
self._remote_proxies = {}
self._verified_types = set()
self.id = None
self.conn = None # type: ctx.Connection
self.pickle_version = PICKLE_PROTOCOL_MIN
def __del__(self): # noqa
# DO NOT REMOVE
# This is required on Python 2.7 to ensure that the object is properly GC'ed
# and that there isn't an attempt to close an FD with a stale object
pass
def get_exposed(self, exposed_name):
self._send_obj((ROP_GET_EXPOSED, exposed_name))
return self._recv() # type: _BaseProxy
def get_remote_proxy_def(self, remote_id):
remote_proxy_defs = self._remote_proxy_defs
proxy_def = remote_proxy_defs.get(remote_id, None)
if proxy_def is None:
proxy_def = self.request_remote_proxy_def(remote_id)
remote_proxy_defs[remote_id] = proxy_def
return proxy_def
def get_remote_proxy(self, proxy_def):
remote_id = proxy_def.remote_id
remote_proxies = self._remote_proxies
remote_proxy = remote_proxies.get(remote_id, None)
if remote_proxy is None:
remote_proxy_type = proxy_def.make_proxy_type()
if proxy_def.is_type:
remote_proxy = remote_proxy_type
remote_proxy._BaseProxy__rop = self
remote_proxy._BaseProxy__proxy_def = proxy_def
else:
remote_proxy = remote_proxy_type(self, proxy_def)
for k, v in proxy_def.spec_fields.items():
setattr(remote_proxy, k, v)
remote_proxies[remote_id] = remote_proxy
logger.debug("registered local proxy for remote ID %d: %r", remote_id, remote_proxy)
return remote_proxy
def get_remote(self, obj):
return self._ros.get_remote(obj)
def get_remote_obj_by_id(self, remote_id):
return self._ros.get_remote_obj_by_id(remote_id)
def expose(self, name, obj, remote=True, methods=None, fields=None):
return self._ros.expose(name, obj, remote, methods, fields)
def hide(self, name):
self._ros.hide(name)
def register_remote(self, obj, methods=None, fields=None):
return self._ros.register_remote(obj, methods, fields)
def register_remote_type(self, t):
return self._ros.register_remote_type(t)
def close_client_side(self):
"""Ensures that after the child process is spawned the parent relinquishes FD of the child's side pipe"""
if self._conn_p and self._conn_c:
self._conn_c.close()
self._conn_c = None
self._conn_p = None
def close(self, exc=None):
if not self.conn.closed:
try:
self._send_obj((ROP_CLOSE, exc))
self._recv_obj()
except PipeShutdownError:
pass
finally:
self._close()
def _close(self):
try:
self.conn.close()
except OSError:
pass
def remote_close_cause(self):
return self._remote_close_cause
def __getstate__(self):
if self.conn:
raise PickleError("already has been pickled once")
conn_p, conn_c = ctx.Pipe(True)
self._conn_p = conn_p
self._conn_c = conn_c
self.conn = conn_p
pipe_id = id(conn_p)
self.id = ("s", pipe_id)
return ("r", pipe_id), conn_c, PICKLE_PROTOCOL_MAX
def __setstate__(self, state):
self.id = state[0]
conn = state[1]
pickle_max = max(PICKLE_PROTOCOL_MAX, state[2])
self._conn_c = conn
self._conn_p = None
self.conn = conn
self._ros = _RemoteObjectSession()
self._remote_proxy_defs = {}
self._remote_proxies = {}
self._verified_types = set()
self._remote_close_cause = None
# Not an error. We HAVE to make sure the first send uses minimally-supported Pickle version
self.pickle_version = PICKLE_PROTOCOL_MIN
self._send_obj((ROP_PICKLE_VERSION, pickle_max))
self.pickle_version = pickle_max
logger.debug("selected pickle protocol v%r", pickle_max)
def __repr__(self):
return "RemoteObjectPipe [id=%r, type=%r, conn=%r, conn_fd=%r]" % (
self.id,
"pending" if not self.conn else "parent" if self._conn_p else "client",
self.conn, self.conn._handle if self.conn and hasattr(self.conn, "_handle") else None)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _recv(self):
"""Returns True if shutdown received, False otherwise"""
while True:
data = self._recv_obj()
action_type = data[0]
if action_type == ROP_REMOTE_ACTION:
remote_id = data[1]
obj = self._ros.get_remote_obj_by_id(remote_id)
if obj is None:
self._send_obj(
(ROP_REMOTE_ACTION_REMOTE_ERROR, remote_id, "Remote object %r is gone", (remote_id,)))
remote_name = data[2]
remote_name_action = data[3]
if remote_name_action == ROP_REMOTE_ACTION_CALL:
call_args = data[4]
call_kwargs = data[5]
if isinstance(obj, type) and remote_name.startswith("__"):
func = getattr(type(obj), remote_name, None)
call_args = [obj] + list(call_args)
else:
func = getattr(obj, remote_name, None)
if not callable(func):
self._send_obj((ROP_REMOTE_ACTION_REMOTE_ERROR, remote_id,
"%r is not a callable",
(remote_name,)))
logger.debug("calling %r.%r (remote ID %d) with args=%r, kwargs=%r",
remote_name,
obj,
remote_id,
call_args, call_kwargs)
return_val = return_exc = None
try:
return_val = func(*call_args, **call_kwargs)
except SystemExit as e:
raise e
except KeyboardInterrupt as e:
raise e
except Exception as e:
return_exc = e
if return_exc is not None:
self._send_obj((ROP_REMOTE_ACTION_EXCEPTION, return_exc))
else:
self._send_obj((ROP_REMOTE_ACTION_RETURN, return_val))
continue
if remote_name_action == ROP_REMOTE_ACTION_GETATTR:
return_val = return_exc = None
try:
return_val = getattr(obj, remote_name, None)
except SystemExit as e:
raise e
except KeyboardInterrupt as e:
raise e
except Exception as e:
return_exc = e
if return_exc is not None:
self._send_obj((ROP_REMOTE_ACTION_EXCEPTION, return_exc))
else:
self._send_obj((ROP_REMOTE_ACTION_RETURN, return_val))
continue
if remote_name_action == ROP_REMOTE_ACTION_SETATTR:
return_val = return_exc = None
try:
setattr(obj, remote_name, data[4])
except SystemExit as e:
raise e
except KeyboardInterrupt as e:
raise e
except Exception as e:
return_exc = e
if return_exc is not None:
self._send_obj((ROP_REMOTE_ACTION_EXCEPTION, return_exc))
else:
self._send_obj((ROP_REMOTE_ACTION_RETURN, return_val))
continue
if action_type == ROP_VERIFY_TYPES_RESULT:
new_verified = data[1]
proxy_types = data[2]
verified_types = self._verified_types
pickle_version = self.pickle_version
for module, name in new_verified:
cls = find_class(module, name, pickle_version)
verified_types.add(cls)
for module, name in proxy_types:
cls = find_class(module, name, pickle_version)
self.register_remote_type(cls)
return
if action_type == ROP_VERIFY_TYPES:
verify_types = data[1]
need_proxy = []
new_verified = []
if verify_types:
verified_types = self._verified_types
pickle_version = self.pickle_version
for module_name in verify_types:
module, name = module_name
try:
cls = find_class(module, name, pickle_version)
verified_types.add(cls)
new_verified.append(module_name)
except Exception:
need_proxy.append(module_name)
self._send_obj((ROP_VERIFY_TYPES_RESULT, new_verified, need_proxy))
continue
if action_type == ROP_REMOTE_ACTION_REMOTE_ERROR:
remote_id = data[1]
msg = data[2]
args = data[3]
raise RemoteObjectError(msg % args)
if action_type == ROP_REMOTE_ACTION_RETURN:
return_val = data[1]
return return_val
if action_type == ROP_REMOTE_ACTION_EXCEPTION:
return_exc = data[1]
raise return_exc
if action_type == ROP_GET_EXPOSED:
exposed_name = data[1]
exposed = self._ros.get_exposed_by_name(exposed_name)
self._send_obj((ROP_GET_EXPOSED_RESULT, exposed))
continue
if action_type == ROP_GET_EXPOSED_RESULT:
return data[1]
if action_type == ROP_GET_PROXY_DEF:
remote_id = data[1]
proxy_def = self._ros.get_proxy_by_id(remote_id) # type: ProxyDef
logger.debug("request for proxy with remote ID %d is returning %r", remote_id, proxy_def)
self._send_obj((ROP_GET_PROXY_DEF_RESULT, proxy_def))
continue
if action_type == ROP_GET_PROXY_DEF_RESULT:
return data[1]
if action_type == ROP_CLOSE:
self._set_remote_close_cause(data[1])
self._send_obj((ROP_CLOSE_CLOSED, None))
try:
self._recv_obj(suppress_error=True)
finally:
try:
self._close()
finally:
raise PipeShutdownError()
if action_type == ROP_PICKLE_VERSION:
self.pickle_version = data[1]
logger.debug("selected pickle protocol v%r", self.pickle_version)
return
raise RuntimeError("received data I can't understand: %r" % (data,))
receive = _recv
def request_remote_proxy_def(self, remote_id):
self._send_obj((ROP_GET_PROXY_DEF, remote_id))
return self._recv()
def call(self, remote_id, remote_name, call_args, call_kwargs):
try:
self._send_obj((ROP_REMOTE_ACTION, remote_id, remote_name, ROP_REMOTE_ACTION_CALL, call_args, call_kwargs))
return self._recv()
except ConnectionError as e:
raise RemoteObjectError(e)
def call_getattr(self, remote_id, remote_name):
try:
self._send_obj((ROP_REMOTE_ACTION, remote_id, remote_name, ROP_REMOTE_ACTION_GETATTR))
return self._recv()
except ConnectionError as e:
raise RemoteObjectError(e)
def call_setattr(self, remote_id, remote_name, value):
try:
self._send_obj((ROP_REMOTE_ACTION, remote_id, remote_name, ROP_REMOTE_ACTION_SETATTR, value))
return self._recv()
except ConnectionError as e:
raise RemoteObjectError(e)
def _set_remote_close_cause(self, e):
if self._remote_close_cause is None:
self._remote_close_cause = e
def _dump(self, obj):
while True:
buf, verify_types = RemoteObjectPickler.dumps(obj, self)
if verify_types:
self._send_obj((ROP_VERIFY_TYPES, verify_types))
self._recv()
else:
return buf
if PY2 and IS_WIN:
# Python 2 on Windows uses Python multiprocessing
def _send_obj(self, obj):
"""Send a (picklable) object"""
if self.conn.closed:
raise OSError("handle is closed")
buf = self._dump(obj)
logger.debug("sending %r", obj)
try:
self.conn.send_bytes(buf)
except (ConnectionError, EOFError) as e:
logger.debug("failed to send %r", obj, exc_info=e)
try:
self._set_remote_close_cause(e)
raise PipeShutdownError()
finally:
self._close()
def _recv_obj(self, suppress_error=False):
"""Receive a (picklable) object"""
if self.conn.closed:
raise OSError("handle is closed")
try:
buf = self.conn.recv_bytes()
except (ConnectionError, EOFError) as e:
if suppress_error:
return
logger.debug("receive has failed", exc_info=e)
try:
self._set_remote_close_cause(e)
raise PipeShutdownError()
finally:
self._close()
obj = RemoteObjectUnpickler.loads(buf, self)
logger.debug("received %r", obj)
return obj
else:
# Python 2 on Linux uses Billiard that is API-compatible with Python 3
def _send_obj(self, obj):
"""Send a (picklable) object"""
self.conn._check_closed()
buf = self._dump(obj)
logger.debug("sending %r", obj)
try:
self.conn._send_bytes(buf)
except (ConnectionError, EOFError) as e:
logger.debug("failed to send %r", obj, exc_info=e)
try:
self._set_remote_close_cause(e)
raise PipeShutdownError()
finally:
self._close()
def _recv_obj(self, suppress_error=False):
"""Receive a (picklable) object"""
self.conn._check_closed()
try:
buf = self.conn._recv_bytes()
except (ConnectionError, EOFError) as e:
if suppress_error:
return
logger.debug("receive has failed", exc_info=e)
try:
self._set_remote_close_cause(e)
raise PipeShutdownError()
finally:
self._close()
obj = RemoteObjectUnpickler.loads(buf.getvalue(), self)
logger.debug("received %r", obj)
return obj
def find_class(module, name, proto):
if proto < 3 and _compat_pickle:
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
elif module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
__import__(module, level=0)
if proto >= 4:
return _getattribute(sys.modules[module], name)[0]
else:
return getattr(sys.modules[module], name)
def _getattribute(obj, name):
for subpath in name.split('.'):
if subpath == '<locals>':
raise AttributeError("Can't get local attribute {!r} on {!r}"
.format(name, obj))
try:
parent = obj
obj = getattr(obj, subpath)
except AttributeError:
raise AttributeError("Can't get attribute {!r} on {!r}"
.format(name, obj))
return obj, parent
|
status_test.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/1 下午4:50
# @Author : JakeShihao Luo
# @Email : jakeshihaoluo@gmail.com
# @File : 0.py
# @Software: PyCharm
from scanner import *
from tello_node import *
import multiprocessing
import numpy as np
import copy
import re
def scheduler(tello_node, permission_flag):
print('scheduler thread start...')
while True:
permission_list = []
ban_list = []
target = {}
tmp = []
candidate_list = list(tello_node.keys())
candidate_dict = {}
tmp2 = None
for key in tello_node.keys():
target[key] = tello_node[key].get_target()
# print('target: \n', target)
while True:
# for key in candidate_dict.keys():
# print(len(candidate_dict[key]))
if len(candidate_list) == 0:
# permission_list.append(candidate_list[0])
break
candidate_list2 = copy.deepcopy(candidate_list)
for key in candidate_list2:
for key2 in candidate_list:
if key == key2:
pass
else:
d = np.linalg.norm(np.array(target[key][0:2]) - np.array(target[key2][0:2]), 2)
if d <= 150:
tmp.append(key2)
if len(tmp) == 0:
permission_list.append(key)
candidate_list.remove(key)
else:
candidate_dict[key] = tmp
tmp = []
var = 1
for key in candidate_list:
if len(candidate_dict[key]) >= var:
tmp2 = key
# print('inside tmp2 is', tmp2)
var = len(candidate_dict[key])
# print('outside tmp2 is', tmp2)
if tmp2 is not None:
ban_list.append(tmp2)
if tmp2 in candidate_list:
candidate_list.remove(tmp2)
# print('permission list', permission_list)
# print('candidate list', candidate_list)
# print('ban list', ban_list)
time.sleep(0.1)
# print('permission list', permission_list)
# print('candidate list', candidate_list)
# print('ban list', ban_list)
for key in permission_list:
permission_flag[key].value = 1
# print('permission flag of {} is {}'.format(key, permission_flag[key].value))
for key in ban_list:
if key is None:
pass
else:
permission_flag[key].value = 0
# for key in permission_flag.keys():
# print('key: {}, value:{}'.format(key, permission_flag[key].value))
time.sleep(0.5)
def received_ok(kwargs):
soc_res = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
soc_res.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc_res.bind(('', 8889))
while True:
try:
response, ip = soc_res.recvfrom(1024)
ip = ''.join(str(ip[0]))
res = response.decode(encoding='utf-8', errors='ignore').upper()
if res == 'OK' or res == 'out of range':
with kwargs[ip].get_lock():
kwargs[ip].value = 1
# print('in received ok, set res of {} from 0 to {}'.format(ip, kwargs[ip].value))
elif 'A' in res and 'G' in res and 'X' in res:
t = re.split(':|;', res)
print(t[1:-1:2])
else:
print('RES from {}:{}'.format(ip, response.decode(encoding='utf-8', errors='ignore')))
# print('in received ok, main alg {}'.format(main_flag.value))
except Exception:
print('Exception occur in received_ok thread...')
# path1 = [[240, 0, 240, 0],
# [500, 0, 240, 90],
# [500, 700, 240, 180],
# [240, 700, 240, 270]]
path2 = [[240, 0, 240, 0],
[500, 0, 240, 90],
[500, 350, 240, 180],
[240, 350, 240, 270]]
path1 = [[600, 700, 240, 90],
[600, 800, 240, 180],
[240, 800, 240, 270],
[240, 500, 240, 0],
[600, 500, 240, 90]]
path3 = [[240, 0, 240, 0],
[500, 0, 240, 90],
[500, 350, 240, 180],
[240, 350, 240, 270]]
path4 = [[600, 700, 240, 90],
[600, 800, 240, 180],
[240, 800, 240, 270],
[240, 500, 240, 0],
[600, 500, 240, 90]]
path = [path1, path2, path3, path4]
num = 1
Node = {}
Res_flag = {}
Permission_flag = {}
Target = {}
scanner = Scanner('192.168.1.')
scanner.find_available_tello(num)
tello_list = scanner.get_tello_info()
main_thread_flag = multiprocessing.Value('i', 0)
for tello in tello_list:
Res_flag[tello[0]] = multiprocessing.Value('i', 0)
Permission_flag[tello[0]] = multiprocessing.Value('i', 0)
rec_thread = multiprocessing.Process(target=received_ok, args=(Res_flag,), daemon=True)
rec_thread.start()
for i in range(num):
Node[tello_list[i][0]] = TelloNode(tello_list[i], Res_flag[tello_list[i][0]],
main_thread_flag, Permission_flag[tello_list[i][0]], 0)
# ini_pose = path[i][0].copy()
# ini_pose[2] = 85
Node[tello_list[i][0]].init_path(path[i], [240, 100, 85, 270])
Node[tello_list[i][0]].run()
# Node[tello_list[i][0]].takeoff()
per_thread = multiprocessing.Process(target=scheduler, args=(Node, Permission_flag), daemon=True)
per_thread.start()
old = time.time()
old1 = time.time()
old2 = time.time()
face_flag = 0
target_pose = None
lock_flag = np.zeros(num)
# Node[tello_list[0][0]].send_command('>streamon')
while True:
Node[tello_list[0][0]].send_command('>acceleration?')
# Node[tello_list[0][0]].send_command('>attitude?')
time.sleep(0.1)
|
nxs_redis_queue.py
|
import time
import pickle
import numpy as np
from threading import Thread
from typing import Any, List, Dict
from nxs_libs.queue import NxsQueuePuller, NxsQueuePusher
from nxs_utils.nxs_helper import init_redis_client
from nxs_utils.logging import write_log, NxsLogLevel
class NxsRedisQueuePuller(NxsQueuePuller):
def __init__(
self,
address: str,
port: int,
password: str,
is_using_ssl: bool,
topic: str,
**kwargs,
) -> None:
super().__init__()
self._address = address
self._port = port
self._password = password
self._is_using_ssl = is_using_ssl
self._session_uuid = ""
if "session_uuid" in kwargs:
self._session_uuid: str = kwargs["session_uuid"]
self._topic = (
f"{topic}_{self._session_uuid}" if self._session_uuid != "" else topic
)
self._client = init_redis_client(
self._address, self._port, self._password, self._is_using_ssl
)
self._log_level = NxsLogLevel.INFO
self._logging_prefix = f"NxsRedisQueuePuller_{self._topic}"
"""
The current design stores number of partitions the key has in "topic" key.
Depending on number of partitions, real data will be spanned across multiple keys such as topic_0, topic_1, etc...
"""
self._check_num_partitions_period_secs = 3
self._check_num_partitions_t0: float = time.time()
self._num_partitions = self._get_topic_num_partitions()
self._count = 0 # number of items returned to user
self._buf_size = 1 # maximum size of shared data store
self._buf = [] # this is used as shared data store for all reader threads
self._max_timeout_secs = 1 # maximum timeout for each thread to read data
self._check_topic_period_secs = 3
# spawn threads to read data
self._reader_threads: List[Thread] = []
self._reader_thread_alive_flags: List[
bool
] = [] # use this to control thread's liveliness
# create reader threads
for tid in range(self._num_partitions):
t = Thread(target=self._reader_thread_fn, args=(tid,))
self._reader_threads.append(t)
self._reader_thread_alive_flags.append(True)
t.start()
# create monitoring thread
self._monitor_thread = Thread(target=self._monitor_thread_fn, args=())
self._monitor_thread_alive_flag = True
self._monitor_thread.start()
def _recreate_client(self):
try:
self._client = init_redis_client(
self._address, self._port, self._password, self._is_using_ssl
)
except:
pass
def _set_with_retry(self, topic: str, data: Any, expiration_duration_secs: int = 0):
while True:
try:
self._client.set(topic, data)
if expiration_duration_secs > 0:
self._client.expire(topic, expiration_duration_secs)
break
except:
time.sleep(0.01)
self._recreate_client()
def _push_with_retry(self, topic: str, data: Any, expiration_duration_secs: int):
while True:
try:
self._client.rpush(topic, data)
self._client.expire(topic, expiration_duration_secs)
break
except:
time.sleep(0.01)
self._recreate_client()
def _get_with_retry(self, topic: str):
while True:
try:
data = self._client.get(topic)
return data
except:
time.sleep(0.01)
self._recreate_client()
def _reader_thread_fn(self, thread_id: int):
topic = f"{self._topic}_{thread_id}"
self._log(f"Read thread {thread_id} was created for topic {topic} !!!")
# print(f"Read thread {thread_id} was created for topic {topic} !!!")
while self._reader_thread_alive_flags[thread_id]:
# standby if buffer is full
if len(self._buf) >= self._buf_size:
time.sleep(0.001)
continue
try:
data = self._client.blpop([topic], timeout=self._max_timeout_secs)
if data is None:
time.sleep(0.001)
continue
_, d = data
d = pickle.loads(d)
self._buf.append(d)
except:
time.sleep(0.01)
self._log(
f"Reader thread {thread_id} / {self._num_partitions} is being terminated!!!"
)
def _monitor_thread_fn(self):
self._log("Monitoring thread was created!!!")
while self._monitor_thread_alive_flag:
if (
time.time() - self._check_num_partitions_t0
< self._check_num_partitions_period_secs
):
time.sleep(0.1)
continue
try:
num_partitions = self._get_topic_num_partitions()
# scale # threads if needed
delta = abs(num_partitions - self._num_partitions)
for _ in range(delta):
if num_partitions > self._num_partitions:
self._add_reader_thread()
elif num_partitions < self._num_partitions:
self._remove_read_thread()
self._num_partitions = num_partitions
self._check_num_partitions_t0 = time.time()
except:
time.sleep(0.01)
self._recreate_client()
self._log("Monitoring thread is being terminated!!!")
def _add_reader_thread(self):
tid = len(self._reader_threads)
t = Thread(target=self._reader_thread_fn, args=(tid,))
self._reader_threads.append(t)
self._reader_thread_alive_flags.append(True)
t.start()
def _remove_read_thread(self):
t = self._reader_threads[-1]
# trigger thread t to exit
self._reader_thread_alive_flags[-1] = False
t.join()
self._reader_threads.pop(-1)
self._reader_thread_alive_flags.pop(-1)
def _get_topic_num_partitions(self) -> int:
# data = self._client.get(self._topic)
data = self._get_with_retry(self._topic)
if not isinstance(data, type(None)):
return pickle.loads(data)
return 1
def pull(self) -> List:
results = []
cur_buf_size = len(self._buf)
for _ in range(cur_buf_size):
data = self._buf.pop(0)
results.append(data)
return results
def pull_buffered_and_close(self) -> List:
# stop receiving data
self._buf_size = 0
# stop all threads
self._monitor_thread_alive_flag = False
for i in range(len(self._reader_thread_alive_flags)):
self._reader_thread_alive_flags[i] = False
self._monitor_thread.join()
for t in self._reader_threads:
t.join()
return self.pull()
def update_buf_size(self, new_buf_size: int):
assert new_buf_size > 0, "new_buf_size should be larger than 0!!!"
self._buf_size = new_buf_size
def update_max_timeout(self, timeout_secs: float):
assert timeout_secs >= 0.001, "timeout_secs should be at least 1ms!!!"
self._max_timeout_secs = timeout_secs
def update_check_num_partition_period(self, period_secs: float):
assert period_secs >= 1, "period_secs should be at least 1 second!!!"
self._check_num_partitions_period_secs = period_secs
def change_log_level(self, level: NxsLogLevel):
self._log_level = level
def _log(self, log):
write_log(self._logging_prefix, log, self._log_level)
def set_buf_size(self, size: int):
if size > 0:
self.update_buf_size(size)
def get_num_buffered_items(self):
return len(self._buf)
def set_num_partitions(self, num_partitions: int):
# self._client.set(self._topic, pickle.dumps(num_partitions))
self._set_with_retry(self._topic, pickle.dumps(num_partitions))
class NxsRedisQueuePusher(NxsQueuePusher):
def __init__(
self,
address: str,
port: int,
password: str,
is_using_ssl: bool,
**kwargs,
) -> None:
super().__init__()
self._address = address
self._port = port
self._password = password
self._is_using_ssl = is_using_ssl
self._client = init_redis_client(
self._address, self._port, self._password, self._is_using_ssl
)
self._log_level = NxsLogLevel.INFO
self._logging_prefix = f"NxsRedisQueuePusher"
self._topic2partitions: dict[str, int] = {}
self._topic2partitionIdx: dict[str, int] = {}
self._topic2timestamp: dict[str, float] = {}
self._check_num_partitions_period_secs = 3
self._new_topic_num_partitions = 1
self._expiration_duration_secs: int = 3600
def _recreate_client(self):
try:
self._client = init_redis_client(
self._address, self._port, self._password, self._is_using_ssl
)
except:
pass
def _set_with_retry(self, topic: str, data: Any, expiration_duration_secs: int = 0):
while True:
try:
self._client.set(topic, data)
if expiration_duration_secs > 0:
self._client.expire(topic, expiration_duration_secs)
break
except:
time.sleep(0.01)
self._recreate_client()
def _push_with_retry(self, topic: str, data: Any, expiration_duration_secs: int):
while True:
try:
self._client.rpush(topic, data)
self._client.expire(topic, expiration_duration_secs)
break
except:
time.sleep(0.01)
self._recreate_client()
def _get_with_retry(self, topic: str):
while True:
try:
data = self._client.get(topic)
return data
except:
time.sleep(0.01)
self._recreate_client()
def create_topic(self, topic: str):
# self._client.set(topic, pickle.dumps(self._new_topic_num_partitions))
self._set_with_retry(topic, pickle.dumps(self._new_topic_num_partitions))
self._topic2partitions[topic] = self._new_topic_num_partitions
self._topic2timestamp[topic] = time.time()
def push(self, topic: str, data):
if (not topic in self._topic2timestamp) or (
time.time() - self._topic2timestamp[topic]
> self._check_num_partitions_period_secs
):
num_partitions = self._get_topic_num_partitions(topic)
self._topic2partitions[topic] = num_partitions
self._topic2partitionIdx[topic] = 0
self._topic2timestamp[topic] = time.time()
# chosen_partition_idx = np.random.randint(self._topic2partitions[topic])
chosen_partition_idx = self._topic2partitionIdx[topic]
self._topic2partitionIdx[topic] = (
self._topic2partitionIdx[topic] + 1
) % self._topic2partitions[topic]
partitioned_topic = self._get_partitioned_topic_name(
topic, chosen_partition_idx
)
# self._client.rpush(partitioned_topic, pickle.dumps(data))
# self._client.expire(partitioned_topic, self._expiration_duration_secs)
self._push_with_retry(
partitioned_topic, pickle.dumps(data), self._expiration_duration_secs
)
def push_to_session(self, topic: str, session_uuid: str, data) -> None:
new_topic = f"{topic}_{session_uuid}"
return self.push(new_topic, data)
def delete_topic(self, topic: str):
pass
def update_check_num_partition_period(self, period_secs: float):
self._check_num_partitions_period_secs = period_secs
def update_new_topic_num_partitions(self, num_partitions: int):
assert num_partitions >= 1, "num_partitions should be larger than 0 !!!"
self._new_topic_num_partitions = num_partitions
def update_expiration_duration_secs(self, duration_secs: float):
assert duration_secs >= 30, "duration_secs should be larger than 30 !!!"
self._expiration_duration_secs = int(duration_secs)
def _get_partitioned_topic_name(self, topic: str, partition_idx: int):
return f"{topic}_{partition_idx}"
def _get_topic_num_partitions(self, topic) -> int:
# data = self._client.get(topic)
data = self._get_with_retry(topic)
if not isinstance(data, type(None)):
return pickle.loads(data)
return 1
def _set_topic_num_partitions(self, topic: str, num_partitions: int):
# self._client.set(topic, pickle.dumps(num_partitions))
self._set_with_retry(topic, pickle.dumps(num_partitions))
def update_config(self, config: dict = {}):
if "num_partitions" in config:
self._new_topic_num_partitions = config["num_partitions"]
def _log(self, log):
write_log(self._logging_prefix, log, self._log_level)
|
federated_learning_keras_consensus_FL_threads_MNIST_gradients_exchange.py
|
from DataSets import MnistData
from DataSets_task import MnistData_task
from consensus.consensus_v4 import CFA_process
from consensus.parameter_server_v2 import Parameter_Server
# use only for consensus , PS only for energy efficiency
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=0, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=1, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.001, help="sets the learning rate for all setups", type=float)
parser.add_argument('-mu2', default=0.01, help="sets the gradient update rate", type=float)
parser.add_argument('-eps', default=0.5, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-eps_grads', default=0.5, help="sets the mixing parameters for gradient combining (CFA-GE)", type=float)
parser.add_argument('-target', default=0.1, help="sets the target loss to stop federation", type=float)
parser.add_argument('-K', default=30, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-Ka_consensus', default=30, help="sets the number of active devices for consensus", type=int)
parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=1, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-gradients', default=1, help=" set 0 to disable gradient exchange, 1 to enable", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
parser.add_argument('-modelselection', default=0, help="sets the model: 0 for lenet-1", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
max_epochs = 400
condition = args.modelselection
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
################# consensus, create the scheduling function ################
scheduling_tx = np.zeros((devices, max_epochs*2), dtype=int)
if parameter_server and not federated:
indexes_tx = np.zeros((args.Ka, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka, replace=False)
sr = devices - args.Ka + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:,k] = inds
elif not parameter_server and federated:
indexes_tx = np.zeros((args.Ka_consensus, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka_consensus, replace=False)
sr = devices - args.Ka_consensus + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka_consensus + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:, k] = inds
###########################################################################
if active_devices_per_round > devices:
active_devices_per_round = devices
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
validation_train = 60000 # VALIDATION and training DATASET size
validation_test = 10000
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_test/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = 1 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
n_outputs = 10 # 6 classes
validation_start = 1 # start validation in epochs
# Using huber loss for stability
loss_function = keras.losses.Huber()
# save scheduling format
# dict_0 = {"scheduling": scheduling_tx, "devices_scheduling": indexes_tx}
# sio.savemat("results/matlab/CFA_scheduling_devices_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}.mat".format(devices, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run), dict_0)
def get_noniid_data(total_training_size, devices, batch_size):
samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
devices) # create random numbers
samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# Ignore the following if you don't need integers
samples = np.round(samples) # transform them into integers
remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
step = 1 if remainings > 0 else -1
while remainings != 0:
i = np.random.randint(devices)
if samples[i] + step >= 0:
samples[i] += step
remainings -= step
return samples
####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 28, 28, 1)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(28, 28, 1,))
if condition == 0:
# lenet - 1
layer1 = layers.Conv2D(4, kernel_size=(5, 5), activation="relu")(inputs)
layer2 = layers.AveragePooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(8, kernel_size=(5, 5), activation="relu")(layer2)
layer4 = layers.AveragePooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
elif condition == 1:
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu")(inputs)
layer2 = layers.MaxPooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(layer2)
layer4 = layers.MaxPooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
else:
layer1 = layers.Conv2D(14, kernel_size=(3, 3), activation="relu")(inputs)
layer2 = layers.MaxPooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(layer2)
layer4 = layers.MaxPooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
# Convolutions
# layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
# layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
# layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
#
# layer4 = layers.Flatten()(layer3)
#
# layer5 = layers.Dense(512, activation="relu")(layer4)
classification = layers.Dense(n_outputs, activation="linear")(layer5)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round, indexes_tx)
global_target_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
epoch_count = 0
np.save(global_target_model, model_parameters_initial)
np.save(global_epoch, epoch_count)
pause(2) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
else:
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch_count, aggregation_type=0))
epoch_count += 1
np.save(global_epoch, epoch_count)
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
outfile_models_grad = 'results/dump_train_grad{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
#np.random.seed(1)
#tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
model_transmitted = create_q_model()
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
model_transmitted = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
if parameter_server:
epoch_global = 0
training_end = False
#a = model.get_weights()
# set an arbitrary optimizer, here Adam is used
optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
#optimizer2 = keras.optimizers.SGD(learning_rate=args.mu2)
optimizer2 = keras.optimizers.Adam(learning_rate=args.mu2, clipnorm=1.0)
# create a data object (here radar data)
# start = time.time()
if args.noniid_assignment == 1:
data_handle = MnistData_task(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
else:
data_handle = MnistData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
# end = time.time()
# time_count = (end - start)
# print(Training time"time_count)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
if frame_count % number_of_batches == 0:
if not parameter_server:
epoch_count += 1
# check scheduling for federated
if federated:
if epoch_count == 1 or scheduling_tx[device_index, epoch_count] == 1:
training_signal = False
else:
# stop all computing, just save the previous model
training_signal = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Local learning update every "number of batches" batches
# time_count = 0
if frame_count % number_of_batches == 0 and not training_signal:
# run local batches
for i in range(number_of_batches):
start = time.time()
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Calculate loss
loss = loss_function(label_sample, class_v)
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
#end = time.time()
#time_count = time_count + (end-start)/number_of_batches
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
cfa_consensus.update_local_model(model_weights)
grads_v = []
for d in range(len(grads)):
grads_v.append(grads[d].numpy())
grads_v = np.asarray(grads_v)
cfa_consensus.update_local_gradient(grads_v)
# compute gradients for selected neighbors in get_tx_connectvity, obtain a new test observation from local database
obs_t, labels_t = data_handle.getTrainingData(batch_size)
data_batch_t = preprocess_observation(obs_t, batch_size)
masks_t = tf.one_hot(labels_t, n_outputs)
gradient_neighbor = cfa_consensus.get_tx_connectivity(device_index, args.N, devices)
outfile_n = 'results/dump_train_variables{}.npz'.format(gradient_neighbor)
outfile_models_n = 'results/dump_train_model{}.npy'.format(gradient_neighbor)
neighbor_model_for_gradient, success = cfa_consensus.get_neighbor_weights(epoch_count, outfile_n, outfile_models_n, epoch=0, max_lag=1)
if success:
model_transmitted.set_weights(neighbor_model_for_gradient.tolist())
else:
print("failed retrieving the model for gradient computation")
with tf.GradientTape() as tape2:
# Train the model on data samples
classes = model_transmitted(data_batch_t)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks_t), axis=1)
# Calculate loss
loss = loss_function(labels_t, class_v)
# getting and save neighbor gradients
grads_t = tape2.gradient(loss, model_transmitted.trainable_variables)
grads_v = []
for d in range(len(grads_t)):
grads_v.append(grads_t[d].numpy())
grads_v = np.asarray(grads_v)
np.save(outfile_models_grad, grads_v)
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if not train_start:
if federated and not training_signal:
eps_c = args.eps
# apply consensus for model parameter
neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
#if args.gradients == 0 or running_loss < 0.5:
if args.gradients == 0:
# random selection of neighor
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N, replace=False) # choose neighbor
# while neighbor == device_index:
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N,
# replace=False) # choose neighbor
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
training_signal = True # stop local learning, just do validation
else:
# compute gradients as usual
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
print("Applying gradient updates...")
# model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
model_averaging = cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag)
model.set_weights(model_averaging)
if cfa_consensus.getTrainingStatusFromNeightbor():
# model.set_weights(model_averaging)
training_signal = True # stop local learning, just do validation
else:
grads = cfa_consensus.federated_grads_computing(neighbor, args.N, epoch_count, args.eps_grads, max_lag)
optimizer2.apply_gradients(zip(grads, model.trainable_variables))
else:
print("Warm up")
train_start = False
del model_weights
#start = time.time()
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
classes = model(data_sample)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Calculate loss
loss = loss_function(label_sample, class_v)
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-1:])
#end = time.time()
#time_count = (end - start)
#print(time_count)
if running_loss < target_loss: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}_gradients{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution, args.gradients), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment,args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}_gradients{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution, args.gradients), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
# ######################### Create a non-iid assignment ##########################
# if args.noniid_assignment == 1:
# total_training_size = training_set_per_device * devices
# samples = get_noniid_data(total_training_size, devices, batch_size)
# while np.min(samples) < batch_size:
# samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t.append(threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)))
t[ii].start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t.append(threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated)))
t[devices].start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
|
threads.py
|
import threading
import time
from django_formset_vuejs.models import Book
def start_cleanup_job():
def cleanup_db():
while True:
time.sleep(60*60)
print('hello')
Book.objects.all().delete()
thread1 = threading.Thread(target=cleanup_db)
thread1.start()
|
processkb.py
|
import logging; logger = logging.getLogger('mylog');
import time
from multiprocessing import Process
import socket
from reasoner import reasoner_start, reasoner_stop
from thought import thought_start, thought_stop
#import reasoner
#import thought
import kb
#import conflictFinder
DEFAULT_MODEL = 'K_myself'
THRESHOLD = 0.2
REASONING_DELAY = 3
class processKB:
def __init__(self, kb):
self.kb = kb
#self.conflicts = conflictFinder.conflicts()
self.models = {DEFAULT_MODEL}
#self.start_services()
# ADD methods :
#-------------------------------------------------
def add(self, stmts, trust=None):
if trust or trust==0:
for model in list(self.models):
self.kb.add(stmts, model, trust)
else:
for model in list(self.models):
self.kb.add(stmts, model)
self.kb.save()
def add_shared(self, stmts, trust=None):
if trust or trust==0:
for model in list(self.models):
self.kb.add(stmts, model, trust)
ids = [("%s%s%s%s"%(s,p,o, model),) for s,p,o in stmts]
for node_id in ids:
self.kb.add([['%s'%(node_id), 'is', 'shared']], model, trust)
else:
for model in list(self.models):
self.kb.add(stmts, model)
ids = [("%s%s%s%s"%(s,p,o, model),) for s,p,o in stmts]
for node_id in ids:
self.kb.add([['%s'%(node_id), 'is', 'shared']],model)
self.kb.save()
def add_common(self, stmts, trust=None):
if trust or trust==0:
for model in list(self.models):
self.kb.add(stmts, model, trust)
ids = [("%s%s%s%s"%(s,p,o, model),) for s,p,o in stmts]
for node_id in ids:
self.kb.add([['%s'%(node_id), 'is', 'common']], model, trust)
else:
for model in list(self.models):
self.kb.add(stmts, model)
ids = [("%s%s%s%s"%(s,p,o, model),) for s,p,o in stmts]
for node_id in ids:
self.kb.add([['%s'%(node_id), 'is', 'common']],model)
self.kb.save()
# SUB methods :
#--------------
def sub(self, stmts, untrust=None):
if untrust or untrust==0:
for model in list(self.models):
self.kb.sub(stmts, model, untrust)
else:
for model in list(self.models):
self.kb.sub(stmts, model)
self.kb.save()
# TEST methods :
#---------------
def __contains__(self, stmts):
test = True
for model in list(self.models):
if self.kb.contains(stmts,model):
pass
else:
test = False
break
return test
# SERVICES methods
#-----------------------------------------
def start_services(self, *args):
self._reasoner = Process(target = reasoner_start)
self._reasoner.start()
self._thought = Process(target = thought_start, args = (self.kb,))
self._thought.start()
logger.info('services started')
def stop_services(self):
self._reasoner.terminate()
self._thought.terminate()
self._reasoner.join()
self._thought.join()
def __call__(self, *args):
try:
# just for testing cascade of new nodes:
#self.start_services()
time.sleep(3)
print('first adds')
story =True
if story:
# TODO : this story is not well described, the models are not the good ones
# need to improve this (each time the fact that mouse sees animals must pass to 0 when mouse leave
# and dont add self.add([[ 'gruffalo', 'rdf:type', 'Agent'], ['gruffalo', 'wants_to_eat', 'fox']],0.8)
# with the mouse inside the models)
''' Gruffalo background '''
self.add([[ 'mouse', 'rdf:type', 'Agent'],['fox','rdf:type','Agent']],1.)
#self.add([[ 'owl', 'rdf:type', 'Agent'],['snake','rdf:type','Agent']],1.)
#self.add([[ 'fox', 'wants_to_eat', 'mouse'],['owl', 'wants_to_eat', 'mouse'],['snake','wants_to_eat', 'mouse']],0.9)
self.models = {'K_myself', 'M_myself:K_fox', 'M_myself:K_mouse'}
self.add([[ 'gruffalo', 'rdf:type', 'Agent']],0.3)
#self.start_services()
#time.sleep(REASONING_DELAY)
#self.stop_services()
''' Gruffalo story '''
''' ch.1 '''
# narator speaks :
#self.add([[ 'mouse', 'sees', 'fox']],1)
#self.add([[ 'fox', 'sees', 'mouse']],1)
#self.start_services()
#time.sleep(REASONING_DELAY)
#self.stop_services()
# mouse speaks to the fox :
self.models = {'M_myself:K_fox'}#,'M_myself:M_fox:K_mouse'}
self.add([['gruffalo', 'rdf:type', 'Agent'], ['gruffalo', 'wants_to_eat', 'fox']],0.9)
self.start_services()
time.sleep(REASONING_DELAY)
self.stop_services()
'''
# narator and mouse see that fox is scared:
self.models = {'K_myself', 'M_myself:K_mouse'}
self.add([[ 'fox', 'fears', 'mouse'], ['fox', 'fears', 'gruffalo']],0.8)
self.start_services()
time.sleep(REASONING_DELAY)
self.stop_services()
self.add([[ 'mouse', 'sees', 'fox']],0)
self.add([[ 'fox', 'sees', 'mouse']],0)
self.start_services()
time.sleep(REASONING_DELAY)
#self.stop_services()
'''
''' end of ch.1'''
print('##############')
print('chapter 1 ok !')
print('##############')
''' ch.2 '''
'''
self.models = DEFAULT_MODEL
# narator speaks :
self.add([[ 'mouse', 'sees', 'owl']],1)
self.add([[ 'owl', 'sees', 'mouse']],1)
# mouse speaks to the fox :
self.models = {'M_myself:K_owl','M_myself:M_owl:K_mouse'}
self.add([[ 'gruffalo', 'rdf:type', 'Agent'], ['gruffalo', 'wants_to_eat', 'owl']],0.6)
# narator and mouse see that fox is scared:
self.models = {'K_myself', 'M_myself:K_mouse'}
self.add([[ 'owl', 'fears', 'mouse'], ['owl', 'fears', 'gruffalo']],0.8)
time.sleep(15)
self.add([[ 'mouse', 'sees', 'owl']],0)
self.add([[ 'owl', 'sees', 'mouse']],0)
'''
''' end of ch.2'''
print('##############')
print('chapter 2 ok !')
print('##############')
''' ch.3 '''
'''
self.models = DEFAULT_MODEL
# narator speaks :
self.add([[ 'mouse', 'sees', 'snake']],1)
self.add([[ 'snake', 'sees', 'mouse']],1)
# mouse speaks to the fox :
self.models = {'M_myself:K_snake','M_myself:M_snake:K_mouse'}
self.add([[ 'gruffalo', 'rdf:type', 'Agent'], ['gruffalo', 'wants_to_eat', 'snake']],0.6)
# narator and mouse see that fox is scared:
self.models = {'K_myself', 'M_myself:K_mouse'}
self.add([[ 'snake', 'fears', 'mouse'], ['snake', 'fears', 'gruffalo']],0.8)
time.sleep(15)
self.add([[ 'mouse', 'sees', 'snake']],0)
self.add([[ 'snake', 'sees', 'mouse']],0)
'''
''' end of ch.3'''
print('##############')
print('chapter 3 ok !')
print('##############')
''' ch.4 '''
'''
self.models = DEFAULT_MODEL
# narator :
self.add([[ 'mouse', 'sees', 'gruffalo']],1)
self.add([[ 'gruffalo', 'sees', 'mouse']],1)
# gruffalo speaks :
self.models = {'K_myself','M_myself:K_mouse'}
self.add([[ 'gruffalo', 'wants_to_eat', 'mouse']],1)
# mouse speaks to the gruffalo :
self.models = {'M_myself:K_gruffalo', 'M_myself:M_mouse:K_gruffalo'}
self.add([['snake', 'fears', 'mouse']],0.4)
self.add([['owl', 'fears', 'mouse']],0.4)
self.add([['fox', 'fears', 'mouse']],0.4)
# gruffalo is not so idiot :
self.models = {'K_myself','M_myself:K_mouse'}
self.add([[ 'gruffalo', 'fears', 'mouse']],0.4)
time.sleep(15)
'''
''' end of ch.4'''
print('##############')
print('chapter 4 ok !')
print('##############')
''' ch.5 '''
'''
self.models = DEFAULT_MODEL
# narator :
self.add([[ 'gruffalo', 'sees', 'snake']],1)
self.add([[ 'gruffalo', 'sees', 'owl']],1)
self.add([[ 'gruffalo', 'sees', 'fox']],1)
self.add([[ 'mouse', 'sees', 'snake']],1)
self.add([[ 'mouse', 'sees', 'owl']],1)
self.add([[ 'mouse', 'sees', 'fox']],1)
self.add([[ 'snake', 'sees', 'gruffalo']],1)
self.add([[ 'owl', 'sees', 'gruffalo']],1)
self.add([[ 'fox', 'sees', 'gruffalo']],1)
# gruffalo and mouse see that other animals are scared :
self.models.add('M_myself:K_mouse')
self.models.add('M_myself:K_gruffalo')
self.add([['fox', 'fears', '?' ]],0.9)
self.add([['owl', 'fears', '?' ]],0.9)
self.add([['snake', 'fears', '?' ]],0.9)
self.add([[ 'gruffalo', 'fears', 'mouse']],0.9)
time.sleep(20)
'''
print('##############')
print('all history ok !')
print('##############')
else:
self.add([['snake', 'rdf:type', 'Reptile']],0.7)
self.add([['Reptile', 'rdfs:subClassOf', 'Animal']],1.0)
self.add([['Animal', 'rdfs:subClassOf', 'Alive']],0.4)
'''
self.add([[ 'sally', 'rdf:type', 'Agent'],['anne','rdf:type','Agent']],[DEFAULT_MODEL],1)
self.add([[ 'Agent', 'is', 'happy']],[DEFAULT_MODEL],1)
model = ['M_myself:K_sally','M_myself:K_anne','K_myself']
self.add([['ball','inside','box1']],model,1)
model = ['M_myself:K_anne','K_myself']
self.add([['ball','inside','box1']],model,0)
self.add([['ball','inside','box2']],model,1)
'''
while True:
'''listend world or dialogues'''
pass
except KeyboardInterrupt:
self.stop_services()
logger.info("Bye bye")
# TESTING
#------------------------------
if __name__=='__main__':
from ansistrm import ColorizingStreamHandler
console = ColorizingStreamHandler()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)-15s: %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
kb = kb.KB()
process = processKB(kb)
process()
|
client.py
|
import http.client
import json
import logging
import threading
import wrapt
from urllib.parse import quote_plus
from SeleniumProxy.logger import get_logger, argstr, kwargstr
from SeleniumProxy.proxy.handler import ADMIN_PATH, CaptureRequestHandler, create_custom_capture_request_handler
from SeleniumProxy.proxy.server import ProxyHTTPServer
log = logging.getLogger(__name__)
# @wrapt.decorator
# def log_wrapper(wrapped, instance, args, kwargs):
# instance.logger.debug("{}({}) [ENTERING]".format(
# wrapped.__name__, ", ".join([argstr(args), kwargstr(kwargs)])))
# ret = wrapped(*args, **kwargs)
# instance.logger.debug("{}() [LEAVING]".format(wrapped.__name__))
# return ret
class AdminClient:
"""Provides an API for sending commands to a remote proxy server."""
def __init__(self, proxy_mgr_addr=None, proxy_mgr_port=None):
# The address of the proxy manager if set
self._proxy_mgr_addr = proxy_mgr_addr
self._proxy_mgr_port = proxy_mgr_port
# Reference to a created proxy instance and its address/port
self._proxy = None
self._proxy_addr = None
self._proxy_port = None
self._capture_request_handler = None
def create_proxy(self, addr='127.0.0.1', port=0, proxy_config=None, options=None):
"""Creates a new proxy server and returns the address and port number that the
server was started on.
Args:
addr: The address the proxy server will listen on. Default 127.0.0.1.
port: The port the proxy server will listen on. Default 0 - which means
use the first available port.
proxy_config: The configuration for any upstream proxy server. Default
is None.
options: Additional options to configure the proxy.
Returns:
A tuple of the address and port number of the created proxy server.
"""
if self._proxy_mgr_addr is not None and self._proxy_mgr_port is not None:
# TODO: ask the proxy manager to create a proxy and return that
pass
if options is None:
options = {}
custom_response_handler = options.get('custom_response_handler')
if custom_response_handler is not None:
self._capture_request_handler = create_custom_capture_request_handler(
custom_response_handler)
else:
self._capture_request_handler = CaptureRequestHandler
self._capture_request_handler.protocol_version = 'HTTP/1.1'
self._capture_request_handler.timeout = options.get(
'connection_timeout', 5)
self._proxy = ProxyHTTPServer((addr, port), self._capture_request_handler,
proxy_config=proxy_config, options=options)
t = threading.Thread(name='Selenium Proxy Server',
target=self._proxy.serve_forever)
t.daemon = not options.get('standalone')
t.start()
socketname = self._proxy.socket.getsockname()
self._proxy_addr = socketname[0]
self._proxy_port = socketname[1]
log.info('Created proxy listening on {}:{}'.format(
self._proxy_addr, self._proxy_port))
return self._proxy_addr, self._proxy_port
def destroy_proxy(self):
"""Stops the proxy server and performs any clean up actions."""
log.info('Destroying proxy')
# If proxy manager set, we would ask it to do this
self._proxy.shutdown()
self._proxy.server_close() # Closes the server socket
def get_requests(self):
return self._make_request('GET', '/requests')
def get_last_request(self):
return self._make_request('GET', '/last_request')
def clear_requests(self):
"""Clears any previously captured requests from the proxy server."""
self._make_request('DELETE', '/requests')
def find(self, path):
"""Find the first request that contains the specified path.
Requests are searched in chronological order.
Args:
path: The request path which can be any part of the request URL.
"""
return self._make_request('GET', '/find?path={}'.format(quote_plus(str(path))))
def get_request_body(self, request_id):
"""Returns the body of the request with the specified request_id.
Args:
request_id: The request identifier.
Returns:
The binary request body, or None if the request has no body.
"""
return self._make_request('GET', '/request_body?request_id={}'.format(request_id)) or None
def get_response_body(self, request_id):
return self._make_request('GET', '/response_body?request_id={}'.format(request_id)) or None
def set_header_overrides(self, headers):
"""Sets the header overrides.
Args:
headers: A dictionary of headers to be used as overrides. Where the value
of a header is set to None, this header will be filtered out.
"""
self._make_request('POST', '/header_overrides', data=headers)
def clear_header_overrides(self):
"""Clears any previously set header overrides."""
self._make_request('DELETE', '/header_overrides')
def get_header_overrides(self):
"""Gets any previously set header overrides"""
return self._make_request('GET', '/header_overrides')
def set_rewrite_rules(self, rewrite_rules):
"""Sets the rewrite rules.
Args:
rewrite_rules: A list of rewrite rules. Each rule is a sublist (or 2-tuple)
containing the pattern and replacement.
"""
self._make_request('POST', '/rewrite_rules', data=rewrite_rules)
def clear_rewrite_rules(self):
"""Clears any previously set rewrite rules."""
self._make_request('DELETE', '/rewrite_rules')
def get_rewrite_rules(self):
"""Gets any previously set rewrite rules"""
return self._make_request('GET', '/rewrite_rules')
def set_scopes(self, scopes):
"""Sets the scopes for the seleniumproxy to log/modify request and response.
Args:
scopes: a regex string or list of regex string.
"""
self._make_request('POST', '/scopes', data=scopes)
def reset_scopes(self):
"""Reset scopes to let proxy capture all requests."""
self._make_request('DELETE', '/scopes')
# @log_wrapper
def get_scopes(self):
"""Gets any previously set scopes"""
return self._make_request('GET', '/scopes')
# @log_wrapper
def _make_request(self, command, path, data=None):
url = '{}{}'.format(ADMIN_PATH, path)
conn = http.client.HTTPConnection(self._proxy_addr, self._proxy_port)
args = {}
if data is not None:
args['body'] = json.dumps(data).encode('utf-8')
conn.request(command, url, **args)
try:
response = conn.getresponse()
if response.status != 200:
raise ProxyException(
'Proxy returned status code {} for {}'.format(response.status, url))
data = response.read()
try:
if response.getheader('Content-Type') == 'application/json':
data = json.loads(data.decode(encoding='utf-8'))
except (UnicodeDecodeError, ValueError):
pass
return data
except ProxyException:
raise
except Exception as e:
raise ProxyException(
'Unable to retrieve data from proxy: {}'.format(e))
finally:
try:
conn.close()
except ConnectionError:
pass
class ProxyException(Exception):
"""Raised when there is a problem communicating with the proxy server."""
|
lock_unittest.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import os
import time
import unittest
import tempfile
from py_utils import lock
def _AppendTextToFile(file_name):
with open(file_name, 'a') as f:
lock.AcquireFileLock(f, lock.LOCK_EX)
# Sleep 100 ms to increase the chance of another process trying to acquire
# the lock of file as the same time.
time.sleep(0.1)
f.write('Start')
for _ in range(10000):
f.write('*')
f.write('End')
def _ReadFileWithSharedLockBlockingThenWrite(read_file, write_file):
with open(read_file, 'r') as f:
lock.AcquireFileLock(f, lock.LOCK_SH)
content = f.read()
with open(write_file, 'a') as f2:
lock.AcquireFileLock(f2, lock.LOCK_EX)
f2.write(content)
def _ReadFileWithExclusiveLockNonBlocking(target_file, status_file):
with open(target_file, 'r') as f:
try:
lock.AcquireFileLock(f, lock.LOCK_EX | lock.LOCK_NB)
with open(status_file, 'w') as f2:
f2.write('LockException was not raised')
except lock.LockException:
with open(status_file, 'w') as f2:
f2.write('LockException raised')
class FileLockTest(unittest.TestCase):
def setUp(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
self.temp_file_path = tf.name
def tearDown(self):
os.remove(self.temp_file_path)
def testExclusiveLock(self):
processess = []
for _ in range(10):
p = multiprocessing.Process(
target=_AppendTextToFile, args=(self.temp_file_path,))
p.start()
processess.append(p)
for p in processess:
p.join()
# If the file lock works as expected, there should be 10 atomic writes of
# 'Start***...***End' to the file in some order, which lead to the final
# file content as below.
expected_file_content = ''.join((['Start'] + ['*']*10000 + ['End']) * 10)
with open(self.temp_file_path, 'r') as f:
# Use assertTrue instead of assertEquals since the strings are big, hence
# assertEquals's assertion failure will contain huge strings.
self.assertTrue(expected_file_content == f.read())
def testSharedLock(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_write_file = tf.name
try:
with open(self.temp_file_path, 'w') as f:
f.write('0123456789')
with open(self.temp_file_path, 'r') as f:
# First, acquire a shared lock on temp_file_path
lock.AcquireFileLock(f, lock.LOCK_SH)
processess = []
# Create 10 processes that also try to acquire shared lock from
# temp_file_path then append temp_file_path's content to temp_write_file
for _ in range(10):
p = multiprocessing.Process(
target=_ReadFileWithSharedLockBlockingThenWrite,
args=(self.temp_file_path, temp_write_file))
p.start()
processess.append(p)
for p in processess:
p.join()
# temp_write_file should contains 10 copy of temp_file_path's content.
with open(temp_write_file, 'r') as f:
self.assertEquals('0123456789'*10, f.read())
finally:
os.remove(temp_write_file)
def testNonBlockingLockAcquiring(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'w') as f:
lock.AcquireFileLock(f, lock.LOCK_EX)
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException raised', f.read())
finally:
os.remove(temp_status_file)
def testUnlockBeforeClosingFile(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'r') as f:
lock.AcquireFileLock(f, lock.LOCK_SH)
lock.ReleaseFileLock(f)
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException was not raised', f.read())
finally:
os.remove(temp_status_file)
def testContextualLock(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'r') as f:
with lock.FileLock(f, lock.LOCK_EX):
# Within this block, accessing self.temp_file_path from another
# process should raise exception.
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException raised', f.read())
# Accessing self.temp_file_path here should not raise exception.
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException was not raised', f.read())
finally:
os.remove(temp_status_file)
|
500A.py
|
# 500A - New Year Transportation
# http://codeforces.com/problemset/problem/500/A
import sys
import threading
def dfs(s, g, vis):
vis[s] = 1
for x in g[s]:
if not vis[x]:
dfs(x, g, vis)
def main():
n, t = map(int, input().split())
arr = [int(x) for x in input().split()]
vis = [0] * n
g = [[] for i in range(n)]
for i, x in enumerate(arr):
g[i].append(i + x)
dfs(0, g, vis)
print('YES' if vis[t - 1] else 'NO')
sys.setrecursionlimit(1 << 30) # or try 10**6
threading.stack_size(1 << 27) # or try 10**8
main_thread = threading.Thread(target=main)
main_thread.start()
main_thread.join()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
@runs_in_hwd_thread
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "LitecoinFinance"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
mk_gal_mp.py
|
# coding: utf-8
"""
Created on Mon Jun 1 00:15:25 2015
@author: hoseung
"""
from galaxymodule import galaxy
import load
import tree
import numpy as np
import utils.sampling as smp
from utils import util
#ncore = int(input("How man cores? \n"))
#wdir = input("Working directory \n")
wdir = '/home/hoseung/Work/data/AGN2/'
ncore = 2
nout=132
snout = str(nout)
rscale = 0.8
npix=800
info = load.info.Info(nout=nout, base=wdir)
frefine= 'refine_params.txt'
fnml = 'cosmo_200.nml'
ptypes=["star id pos mass vel", "dm id pos mass vel"]
# Load all halo
hall = tree.halomodule.Halo(nout=nout, base=wdir, halofinder="RS", info=info)
#hall = tree.halomodule.Halo(nout=nout, base=wdir, halofinder="HM", info=info)
hall.load()
# convert to code unit. - done by default
#hall.normalize()
# subset of halos ONLY inside zoom-in region
i_center = np.where(hall.data['np'] == max(hall.data['np']))[0]
h_ind = smp.extract_halos_within(hall, i_center, scale=2.0)
#%%
h = tree.halomodule.Halo()
h.derive_from(hall, h_ind)
#h.derive_from(hall, h_ind)#, [4921, 5281, 5343, 5365, 5375, 5412, 5415], 5442, 5639, 5665, 6095])
region = smp.set_region_multi(h.data.x, yc=h.data.y, zc=h.data.z, radius = h.data.rvir * rscale)
print(region)
hind = np.where((hall.data.x > region["xr"][0]) & (hall.data.x < region["xr"][1]) &
(hall.data.y > region["yr"][0]) & (hall.data.y < region["yr"][1]) &
(hall.data.z > region["zr"][0]) & (hall.data.z < region["zr"][1]) &
(hall.data.mvir > 1e11))[0]
h.derive_from(hall, hind[5:7])
region = smp.set_region_multi(h.data.x, yc=h.data.y, zc=h.data.z, radius = h.data.rvir * rscale)
#%%
s = load.sim.Sim()
s.setup(nout, wdir)
s.set_ranges(region["ranges"])
s.show_cpus()
#%%
s.add_part(ptypes)
s.part.load()
# convert to km/s
s.part.star['vx'] *= s.info.kms
s.part.star['vy'] *= s.info.kms
s.part.star['vz'] *= s.info.kms
s.part.star["m"] *= s.info.msun
if 'dm' in s.part.pt:
s.part.dm['vx'] *= s.info.kms
s.part.dm['vy'] *= s.info.kms
s.part.dm['vz'] *= s.info.kms
s.part.dm["m"] *= s.info.msun
#%%
s.add_hydro()
s.hydro.amr2cell(lmax=19)
#%%
print("Go!")
# Now, make galaxy data set
import numpy as np
dtype_catalog = [('id', int), ('mtot', '<f8'), ('mgas', '<f8'),
('mstar', '<f8'), ('mdm', '<f8'), ('mhal', '<f8'),
('nstar', int), ('ndm', int), ('nsink', int),
('pos', '<f8', 3), ('vel', '<f8', 3), ('lx', '<f8', 3),
('dcluster', '<f8'), ('b2t', float), ('mag', float, 5),
('sfr', '<f8'), ('lambdar', '<f8'), ('lambdar_arr', '<f8', 30),
("nfit",float), ("morph_vi",str),
("morph_b2t",str), ("add2",float), ("add3",float)]
x_clu = hall.data['x'][i_center]
y_clu = hall.data['y'][i_center]
z_clu = hall.data['z'][i_center]
util.reimport(galaxy)
#util.reimport(draw)
#util.reimport(draw.pp)
#util.reimport(draw.img_obj)
#plt.ioff()
import multiprocessing as mp
import queue
import matplotlib.pyplot as plt
# First halo is the cen
def mkgal(s, halodata, info, i_queue, out_q, star=None, dm=None, cell=None, rscale=0.4):
while True:
try:
i = tasks.get(block=False)
print(i,'/', halodata['id'][i])
gal = galaxy.Galaxy(halodata[i], radius_method='simple', info=info)
gal.mk_gal(star=s.part.star, dm=None, cell=None,
rscale=rscale)
gal_out = {"id":0, "xc":0, "yc":0, "zc":0, "mstar":0.0, "nstar":0,
"lambda_arr":[], "lambda_r":0}
if gal.star is False:
print(gal.id, " Not a good galaxy")
out_q.put(gal_out)
continue
else:
print("R eff:", gal.reff * info.pboxsize)
gal.cal_lambda_r(npix=20, method=1, rscale=1.5) # calculate within 1.0 * reff
# gal.plot_gal(base=wdir + 'galaxy_plot4/')
# gal.save_gal(base=wdir)
# Instead of galaxy class, save them in a dict.
gal_out['mstar'] = gal.mstar
gal_out['nstar'] = gal.nstar
gal_out['id'] = gal.id
gal_out['xc'] = gal.xc
gal_out['yc'] = gal.yc
gal_out['zc'] = gal.zc
gal_out['lambda_arr'] = gal.lambda_arr
gal_out['lambda_r'] = gal.lambda_r
out_q.put(gal_out)
except queue.Empty:
print("Queue closed. Exiting.")
return
except:
continue
nh = len(h.data)
out_q = mp.Queue()
tasks = mp.Queue()
for i in range(nh):
tasks.put(i) # send tasks to workers
processes = [mp.Process(target=mkgal, args=(s, h.data, s.info, tasks, out_q))
for i in range(ncore)]
# run processes
for p in processes:
p.start()
# exit completed processes
print("QUEUE Done")
#print(out_q.empty())
for p in processes:
p.join()
print(p.name,"has terminated and joined.")
print("-------------------- Done --------------------")
#%%
for i in range(nh):
print("dict out", i)
dictout = out_q.get(timeout=2)
#%%
import pandas as pd
catalog = pd.DataFrame(dictout).to_records()
import pickle
with open(wdir + "catalog.pickle", 'wb') as f:
pickle.dump(catalog, f)
#%%
#with open(wdir + 'catalog.pickle', 'rb') as f:
# catalog = pickle.load(f)a
#dir+'000014gal.hdf5', "r")
#sx = infile['galaxy/star/x']
#infile.close()
#util.reimport(utils)
#%%
import utils
# only massive galaxies
lambdar_arr = np.asarray(catalog["lambda_arr"])
i_cat = np.where(catalog['id'] != 0)
catalog = catalog[i_cat]
#%%
i_truegal = catalog['mstar'] > 3e9
disk_list=[]
# Exclude seemingly interacting galaxies
id_nogood = [ ]#, 6033, 6179]
#id_interacting=[5886,8909,6158,6226,]
#id_nogood += disk_list
i_ng = utils.match.match_list_ind(catalog['id'], id_nogood)
tflist=np.full(len(catalog), True, dtype=bool)
tflist[i_ng] = False
#i_voffset = np.wehre(lambdar_arr)
# intersection of two criteria
i_ok = np.logical_and(i_truegal, tflist)
#%%
f = plt.figure()
ax = f.add_subplot(111)
#for i, val in enumerate(lambdar_arr):
cnt = 0
for i, ok in enumerate(i_ok):
if ok:
cnt += 1
if catalog[i]['id'] in disk_list :
print("disk")
ax.plot(lambdar_arr[i], 'r-', alpha=0.5) # up to 1Reff
pass
# ax.plot(val, 'r-') # up to 1Reff
else:
ax.plot(lambdar_arr[i], 'b-', alpha=0.3) # up to 1Reff
print(cnt)
#plt.xlabel() # in the unit of Reff
ax.set_title(r"$\lambda _{R}$")
ax.set_ylabel(r"$\lambda _{R}$")
ax.set_xlabel("["+ r'$R/R_{eff}$'+"]")
ax.set_xlim(right=9)
ax.set_xticks([0, 4.5, 9])
ax.set_xticklabels(["0", "0.5", "1"])
plt.savefig(wdir + "lambda_disk.png")
plt.close()
#%%
ll = catalog['lambda_r'][i_ok]
"""
f = plt.figure()
ax = f.add_subplot(111)
ax.scatter(catalog['dcluster'][i_ok],ll)
#ax.scatter(catalog['dcluster'][i_disk],lld, color='r')
# catalog['lambdar_arr'][i_ok])# catalog['lambdar'][i_ok])
ax.set_xlim(0,1.1)
ax.set_ylim(0,1)
ax.set_xlabel("["+ r'$R/R_{eff}$'+"]")
ax.set_ylabel(r"$\lambda_{R}$")
plt.savefig(wdir + "hdclustervslambdar.png")
plt.close()
"""
f = plt.figure()
ax2 = f.add_subplot(111)
ax2.scatter(np.log10(catalog['mstar'][i_ok]), ll)
#ax2.scatter(np.log10(catalog['mstar'][i_disk]),lld, color='r')
# catalog['lambdar'][i_ok])#catalog['lambdar'][i_ok] )
ax2.set_xlim([9,11])
ax2.set_ylim(0,1)
ax2.set_xlabel("Stellar mass " + r"$[10^{10} M_{\odot}]$")
ax2.set_ylabel(r"$\lambda_{R}$")
plt.savefig(wdir + "msvslambdar.png")
plt.close()
|
mpiexec-mesos.py
|
#!/usr/bin/env python
import mesos.interface
import mesos.native
from mesos.interface import mesos_pb2
import os
import sys
import time
import re
import threading
from optparse import OptionParser
from subprocess import *
def mpiexec():
print "We've launched all our MPDs; waiting for them to come up"
while countMPDs() <= TOTAL_MPDS:
print "...waiting on MPD(s)..."
time.sleep(1)
print "Got %d mpd(s), running mpiexec" % TOTAL_MPDS
try:
print "Running mpiexec"
call([MPICH2PATH + 'mpiexec', '-1', '-n', str(TOTAL_MPDS)] + MPI_PROGRAM)
except OSError,e:
print >> sys.stderr, "Error executing mpiexec"
print >> sys.stderr, e
exit(2)
print "mpiexec completed, calling mpdallexit %s" % MPD_PID
# Ring/slave mpd daemons will be killed on executor's shutdown() if
# framework scheduler fails to call 'mpdallexit'.
call([MPICH2PATH + 'mpdallexit', MPD_PID])
class MPIScheduler(mesos.interface.Scheduler):
def __init__(self, options, ip, port):
self.mpdsLaunched = 0
self.mpdsFinished = 0
self.ip = ip
self.port = port
self.options = options
self.startedExec = False
def registered(self, driver, fid, masterInfo):
print "Mesos MPI scheduler and mpd running at %s:%s" % (self.ip, self.port)
print "Registered with framework ID %s" % fid.value
def resourceOffers(self, driver, offers):
print "Got %d resource offers" % len(offers)
for offer in offers:
print "Considering resource offer %s from %s" % (offer.id.value, offer.hostname)
if self.mpdsLaunched == TOTAL_MPDS:
print "Declining permanently because we have already launched enough tasks"
driver.declineOffer(offer.id)
continue
cpus = 0
mem = 0
tasks = []
for resource in offer.resources:
if resource.name == "cpus":
cpus = resource.scalar.value
elif resource.name == "mem":
mem = resource.scalar.value
if cpus < CPUS or mem < MEM:
print "Declining offer due to too few resources"
driver.declineOffer(offer.id)
else:
tid = self.mpdsLaunched
self.mpdsLaunched += 1
print "Accepting offer on %s to start mpd %d" % (offer.hostname, tid)
task = mesos_pb2.TaskInfo()
task.task_id.value = str(tid)
task.slave_id.value = offer.slave_id.value
task.name = "task %d " % tid
cpus = task.resources.add()
cpus.name = "cpus"
cpus.type = mesos_pb2.Value.SCALAR
cpus.scalar.value = CPUS
mem = task.resources.add()
mem.name = "mem"
mem.type = mesos_pb2.Value.SCALAR
mem.scalar.value = MEM
task.command.value = "%smpd --noconsole --ncpus=%d --host=%s --port=%s" % (MPICH2PATH, CPUS, self.ip, self.port)
tasks.append(task)
print "Replying to offer: launching mpd %d on host %s" % (tid, offer.hostname)
driver.launchTasks(offer.id, tasks)
if not self.startedExec and self.mpdsLaunched == TOTAL_MPDS:
threading.Thread(target = mpiexec).start()
self.startedExec = True
def statusUpdate(self, driver, update):
print "Task %s in state %s" % (update.task_id.value, update.state)
if (update.state == mesos_pb2.TASK_FAILED or
update.state == mesos_pb2.TASK_KILLED or
update.state == mesos_pb2.TASK_LOST):
print "A task finished unexpectedly, calling mpdexit on %s" % MPD_PID
call([MPICH2PATH + "mpdexit", MPD_PID])
driver.stop()
if (update.state == mesos_pb2.TASK_FINISHED):
self.mpdsFinished += 1
if self.mpdsFinished == TOTAL_MPDS:
print "All tasks done, all mpd's closed, exiting"
driver.stop()
def countMPDs():
try:
mpdtraceproc = Popen(MPICH2PATH + "mpdtrace -l", shell=True, stdout=PIPE)
mpdtraceline = mpdtraceproc.communicate()[0]
return mpdtraceline.count("\n")
except OSError,e:
print >>sys.stderr, "Error starting mpd or mpdtrace"
print >>sys.stderr, e
exit(2)
def parseIpPort(s):
ba = re.search("([^_]*)_([0-9]*)", s)
ip = ba.group(1)
port = ba.group(2)
return (ip, port)
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [options] mesos_master mpi_program")
parser.disable_interspersed_args()
parser.add_option("-n", "--num",
help="number of mpd's to allocate (default 1)",
dest="num", type="int", default=1)
parser.add_option("-c", "--cpus",
help="number of cpus per mpd (default 1)",
dest="cpus", type="int", default=1)
parser.add_option("-m","--mem",
help="number of MB of memory per mpd (default 1GB)",
dest="mem", type="int", default=1024)
parser.add_option("--name",
help="framework name", dest="name", type="string")
parser.add_option("-p","--path",
help="path to look for MPICH2 binaries (mpd, mpiexec, etc.)",
dest="path", type="string", default="")
parser.add_option("--ifhn-master",
help="alt. interface hostname for what mpd is running on (for scheduler)",
dest="ifhn_master", type="string")
# Add options to configure cpus and mem.
(options,args) = parser.parse_args()
if len(args) < 2:
print >> sys.stderr, "At least two parameters required."
print >> sys.stderr, "Use --help to show usage."
exit(2)
TOTAL_MPDS = options.num
CPUS = options.cpus
MEM = options.mem
MPI_PROGRAM = args[1:]
# Give options.path a trailing '/', if it doesn't have one already.
MPICH2PATH = os.path.join(options.path, "")
print "Connecting to Mesos master %s" % args[0]
try:
mpd_cmd = MPICH2PATH + "mpd"
mpdtrace_cmd = MPICH2PATH + "mpdtrace -l"
if options.ifhn_master is not None:
call([mpd_cmd, "--daemon", "--ifhn=" + options.ifhn_master])
else:
call([mpd_cmd, "--daemon"])
mpdtraceproc = Popen(mpdtrace_cmd, shell=True, stdout=PIPE)
mpdtraceout = mpdtraceproc.communicate()[0]
except OSError,e:
print >> sys.stderr, "Error starting mpd or mpdtrace"
print >> sys.stderr, e
exit(2)
(ip,port) = parseIpPort(mpdtraceout)
MPD_PID = mpdtraceout.split(" ")[0]
print "MPD_PID is %s" % MPD_PID
scheduler = MPIScheduler(options, ip, port)
framework = mesos_pb2.FrameworkInfo()
framework.user = ""
if options.name is not None:
framework.name = options.name
else:
framework.name = "MPI: %s" % MPI_PROGRAM[0]
driver = mesos.native.MesosSchedulerDriver(
scheduler,
framework,
args[0])
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
descriptor2rdf.py
|
import sys, os, threading, Queue
import numpy as np
'''
This scripts converts the numpy vectors of the descriptors into RDF
files according to the IMGpedia ontology
'''
if len(sys.argv) != 3:
print "usage: python descriptor2rfd.py descriptor_path output_path"
exit(-1)
descriptor_path = sys.argv[1]
output_path = sys.argv[2]
MAX_THREADS = 24
folders = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
images = "images"
descriptors = "descriptors"
prefixes_desc = "@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n@prefix imo: <http://imgpedia.dcc.uchile.cl/ontology#> .\n@prefix imr: <http://imgpedia.dcc.uchile.cl/resource/> .\n"
prefixes_im = prefixes_desc + "@prefix owl: <http://www.w3.org/2002/07/owl#> .\n@prefix dbcr: <http://commons.dbpedia.org/resource/File:>\n\n"
error_log = "rdf_errors.txt"
descriptor_map = {"CLD": "CLD", "GHD":"GHD", "HOG":"HOG"}
def writeRDF(buffer):
while not buffer.empty():
folder, subfolder = buffer.get()
print "working on %s" % subfolder
img_dirs = []
for path, dirs, files in os.walk(os.path.join(descriptor_path,folder,subfolder)):
img_dirs.extend(dirs)
break
for img_dir in img_dirs:
out_images = os.path.join(output_path, images, folder, subfolder)
out_descriptors = os.path.join(output_path, descriptors, folder, subfolder)
if not os.path.exists(out_images):
os.makedirs(out_images)
if not os.path.exists(out_descriptors):
os.makedirs(out_descriptors)
#write rdf file for visual entity
image_rdf = open(os.path.join(out_images, img_dir), "w")
image_rdf.write(prefixes_im)
txt = "imr:%s a imo:Image ;\n" % img_dir
txt += "\timo:folder %s ;\n" % folder
txt += "\timo:subfolder %s ;\n" % subfolder
txt += "\towl:sameAs dbcr:%s ;\n" % img_dir
image_rdf.write(txt)
image_rdf.close()
#write rdf files for each descriptor
for path, dirs, files in os.walk(os.path.join(descriptor_path, folder, subfolder, img_dir)):
if len(files) < 3:
e = open(error_log, "a")
txt = "File %s/%s/%s has only %d descriptors\n" % (folder, subfolder, img_dir, len(files))
e.write(txt)
e.close()
for descriptor_file in files:
descriptor = np.load(os.path.join(descriptor_path, folder, subfolder,img_dir,descriptor_file))
descriptor_rdf = open(os.path.join(out_descriptors, descriptor_file), "w")
descriptor_rdf.write(prefixes_desc)
extension = descriptor_map[descriptor_file[-3:]]
txt = "\nimr:%s a imo:%s ;\n" % (descriptor_file[:-3] + extension, extension)
txt += "\timo:describes imr:%s ;\n" % (img_dir)
txt += "\timo:value \"%s\" ." % (np.array2string(descriptor.T[0], separator=',', max_line_width=100000))
descriptor_rdf.write(txt)
descriptor_rdf.close()
break
buffer.task_done()
print "subfolder %s done" % subfolder
buffer = Queue.Queue()
for folder in folders:
for subfolder in folders:
buffer.put((folder, folder+subfolder))
print "launching threads"
threads = []
for i in range(MAX_THREADS):
t = threading.Thread(target=writeRDF, args=(buffer,))
threads.append(t)
t.start()
buffer.join()
for t in threads:
t.join()
|
thread_func.py
|
# @Time : 2020/12/25
# @Author : Naunter
# @Page : https://github.com/Naunters
# @Page : https://github.com/BDO-CnHope/bdocn_client
from threading import Thread
def thread_it(func, *args):
t = Thread(target=func, args=args)
t.setDaemon(True)
t.start()
#t.join()
|
line.py
|
import threading
class new_line(threading.Thread):
def __init__(self, ID = 8080,name = 'new_line', counter = 101010):
threading.Thread.__init__(self)
self.threadID = ID
self.name = name
self.counter = counter
def run(self):
self.code()
def end(self):
self.john()
class def_line():
def __init__(self,def_,dpass):
t= threading.Thread(target=def_,args=dpass)
self.t = t
self.t().start()
def end(self,stop_time=0):
self.t.john(stop_time)
|
rq_worker.py
|
from qpanel.job import start_process
from multiprocessing import Process
from rq_scheduler.scripts.rqscheduler import main
def start_jobs():
p = Process(target=start_process)
p.start()
start_scheduler()
def start_scheduler():
p = Process(target=main)
p.start()
if __name__ == '__main__':
start_jobs()
|
ParallelDownload.py
|
import threading, wget
class ParallelDownloader:
def __init__(self):
pass
def dUrl(self,url,i):
try:
wget.download(url)
except:
print("Error with : "+url)
def logic1(self):
urls = ["https://images.s3.amazonaws.com/PdfLabelFiles/flipkartShippingLabel_OD107312205540085000-1731220554008500.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559338486.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559338426.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559338357.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559338279.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/404-9012833-0137142_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/171-5056321-1155509_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/403-4455185-5905913_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559295457.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559295397.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559148777.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559148776.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559148775.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559148770.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19805014659-SLP1140406657.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19803429605-SLP1140286741.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/171-7456146-3809129_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559131926.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559131850.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538921681-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538853123-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/171-9284133-0781116_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19801906394-SLP1140178106.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/171-5670213-6464363_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559087648.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/171-0998013-5440314_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/402-3428884-0889148_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/403-3179019-2162765_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/402-2892189-3625157_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559045947.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559045879.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559045815.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/flipkartShippingLabel_OD107310867834425001-1731086783442500.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/402-9459255-6661948_shippinglabel.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538638382-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538630871-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538512662-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538508341-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/flipkartShippingLabel_OD107310694756347000-1731069475634700.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19799680099-SLP1140008175.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19799407603-SLP1139999699.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19798917481-SLP1139967832.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19798845649-SLP1139957984.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559010233.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559010142.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559010038.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/paytm_packing_slip_order_559007311.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19799239237-SLP1139987041.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19798716880-SLP1139950403.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19787010456-SLP1139961489.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19797915979-SLP1139887878.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538385725-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538361501-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538330738-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/ebayShippinglabel_2538321921-15242.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/SnapDealLabel_19798049434-SLP1139897601.pdf",
"https://images.s3.amazonaws.com/PdfLabelFiles/jabong_161010359170961_ship_label_path.pdf"]
count = 0
threadLists = []
for i,url in enumerate(urls):
thread = threading.Thread(target=self.dUrl,args=(url,i))
count = count+1
thread.name = "T%d" % count
threadLists.append(thread)
for it in threadLists:
it.start()
for it in threadLists:
it.join()
obj = ParallelDownloader()
obj.logic1()
|
stdout_supress.py
|
# Code Author Github user minrk
# Originally from https://github.com/minrk/wurlitzer/blob/master/wurlitzer.py
from __future__ import print_function
from contextlib import contextmanager
import ctypes
import errno
from fcntl import fcntl, F_GETFL, F_SETFL
import io
import os
try:
from queue import Queue
except ImportError: # pragma nocover
from Queue import Queue
import selectors
import sys
import threading
import time
libc = ctypes.CDLL(None)
STDOUT = 2
PIPE = 3
try:
c_stdout_p = ctypes.c_void_p.in_dll(libc, 'stdout')
c_stderr_p = ctypes.c_void_p.in_dll(libc, 'stderr')
except ValueError: # pragma: no cover
# libc.stdout is has a funny name on OS X
c_stdout_p = ctypes.c_void_p.in_dll(libc, '__stdoutp') # pragma: no cover
c_stderr_p = ctypes.c_void_p.in_dll(libc, '__stderrp') # pragma: no cover
_default_encoding = getattr(sys.stdin, 'encoding', None) or 'utf8'
if _default_encoding.lower() == 'ascii':
# don't respect ascii
_default_encoding = 'utf8' # pragma: no cover
def dup2(a, b, timeout=3):
"""Like os.dup2, but retry on EBUSY"""
dup_err = None
# give FDs 3 seconds to not be busy anymore
for i in range(int(10 * timeout)):
try:
return os.dup2(a, b)
except OSError as e: # pragma nocover
dup_err = e
if e.errno == errno.EBUSY:
time.sleep(0.1)
else:
raise
if dup_err: # pragma nocover
raise dup_err
class Wurlitzer(object): # pragma: no cover
"""Class for Capturing Process-level FD output via dup2
Typically used via `wurlitzer.capture`
"""
flush_interval = 0.2
def __init__(self, stdout=None, stderr=None, encoding=_default_encoding):
"""
Parameters
----------
stdout: stream or None
The stream for forwarding stdout.
stderr = stream or None
The stream for forwarding stderr.
encoding: str or None
The encoding to use, if streams should be interpreted as text.
"""
self._stdout = stdout
self._stderr = stderr
self.encoding = encoding
self._save_fds = {}
self._real_fds = {}
self._handlers = {}
self._handlers['stderr'] = self._handle_stderr
self._handlers['stdout'] = self._handle_stdout
def _setup_pipe(self, name):
real_fd = getattr(sys, '__%s__' % name).fileno()
save_fd = os.dup(real_fd)
self._save_fds[name] = save_fd
pipe_out, pipe_in = os.pipe()
dup2(pipe_in, real_fd)
os.close(pipe_in)
self._real_fds[name] = real_fd
# make pipe_out non-blocking
flags = fcntl(pipe_out, F_GETFL)
fcntl(pipe_out, F_SETFL, flags | os.O_NONBLOCK)
return pipe_out
def _decode(self, data):
"""Decode data, if any
Called before passing to stdout/stderr streams
"""
if self.encoding:
data = data.decode(self.encoding, 'replace')
return data
def _handle_stdout(self, data):
if self._stdout:
self._stdout.write(self._decode(data))
def _handle_stderr(self, data):
if self._stderr:
self._stderr.write(self._decode(data))
def _setup_handle(self):
"""Setup handle for output, if any"""
self.handle = (self._stdout, self._stderr)
def _finish_handle(self):
"""Finish handle, if anything should be done when it's all wrapped up.
"""
pass
def _flush(self):
"""flush sys.stdout/err and low-level FDs"""
if self._stdout and sys.stdout:
sys.stdout.flush()
if self._stderr and sys.stderr:
sys.stderr.flush()
libc.fflush(c_stdout_p)
libc.fflush(c_stderr_p)
def __enter__(self):
# flush anything out before starting
self._flush()
# setup handle
self._setup_handle()
self._control_r, self._control_w = os.pipe()
# create pipe for stdout
pipes = [self._control_r]
names = {self._control_r: 'control'}
if self._stdout:
pipe = self._setup_pipe('stdout')
pipes.append(pipe)
names[pipe] = 'stdout'
if self._stderr:
pipe = self._setup_pipe('stderr')
pipes.append(pipe)
names[pipe] = 'stderr'
# flush pipes in a background thread to avoid blocking
# the reader thread when the buffer is full
flush_queue = Queue()
def flush_main():
while True:
msg = flush_queue.get()
if msg == 'stop':
return
self._flush()
flush_thread = threading.Thread(target=flush_main)
flush_thread.daemon = True
flush_thread.start()
def forwarder():
"""Forward bytes on a pipe to stream messages"""
draining = False
flush_interval = 0
poller = selectors.DefaultSelector()
for pipe_ in pipes:
poller.register(pipe_, selectors.EVENT_READ)
while pipes:
events = poller.select(flush_interval)
if events:
# found something to read, don't block select until
# we run out of things to read
flush_interval = 0
else:
# nothing to read
if draining:
# if we are draining and there's nothing to read, stop
break
else:
# nothing to read, get ready to wait.
# flush the streams in case there's something waiting
# to be written.
flush_queue.put('flush')
flush_interval = self.flush_interval
continue
for selector_key, flags in events:
fd = selector_key.fd
if fd == self._control_r:
draining = True
pipes.remove(self._control_r)
poller.unregister(self._control_r)
os.close(self._control_r)
continue
name = names[fd]
data = os.read(fd, 1024)
if not data:
# pipe closed, stop polling it
pipes.remove(fd)
poller.unregister(fd)
os.close(fd)
else:
handler = getattr(self, '_handle_%s' % name)
handler(data)
if not pipes:
# pipes closed, we are done
break
# stop flush thread
flush_queue.put('stop')
flush_thread.join()
# cleanup pipes
[os.close(pipe) for pipe in pipes]
self.thread = threading.Thread(target=forwarder)
self.thread.daemon = True
self.thread.start()
return self.handle
def __exit__(self, exc_type, exc_value, traceback):
# flush before exiting
self._flush()
# signal output is complete on control pipe
os.write(self._control_w, b'\1')
self.thread.join()
os.close(self._control_w)
# restore original state
for name, real_fd in self._real_fds.items():
save_fd = self._save_fds[name]
dup2(save_fd, real_fd)
os.close(save_fd)
# finalize handle
self._finish_handle()
@contextmanager
def pipes(stdout=PIPE, stderr=PIPE, encoding=_default_encoding): # pragma: no cover # noqa
"""Capture C-level stdout/stderr in a context manager.
The return value for the context manager is (stdout, stderr).
Examples
--------
>>> with capture() as (stdout, stderr):
... printf("C-level stdout")
... output = stdout.read()
"""
stdout_pipe = stderr_pipe = False
# setup stdout
if stdout == PIPE:
stdout_r, stdout_w = os.pipe()
stdout_w = os.fdopen(stdout_w, 'wb')
if encoding:
stdout_r = io.open(stdout_r, 'r', encoding=encoding)
else:
stdout_r = os.fdopen(stdout_r, 'rb')
stdout_pipe = True
else:
stdout_r = stdout_w = stdout
# setup stderr
if stderr == STDOUT:
stderr_r = None
stderr_w = stdout_w
elif stderr == PIPE:
stderr_r, stderr_w = os.pipe()
stderr_w = os.fdopen(stderr_w, 'wb')
if encoding:
stderr_r = io.open(stderr_r, 'r', encoding=encoding)
else:
stderr_r = os.fdopen(stderr_r, 'rb')
stderr_pipe = True
else:
stderr_r = stderr_w = stderr
if stdout_pipe or stderr_pipe:
capture_encoding = None
else:
capture_encoding = encoding
w = Wurlitzer(stdout=stdout_w, stderr=stderr_w, encoding=capture_encoding)
try:
with w:
yield stdout_r, stderr_r
finally:
# close pipes
if stdout_pipe:
stdout_w.close()
if stderr_pipe:
stderr_w.close()
|
main.py
|
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import ipdb
if __name__ == "__main__":
import torch.multiprocessing as mp
# https://github.com/pytorch/pytorch/issues/3492#issuecomment-392977006
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
import os
os.environ["OMP_NUM_THREADS"] = "1"
import time
from dist_train.utils.experiment_bookend import open_experiment
from dist_train.workers import synchronous_worker
if __name__ == '__main__':
# Interpret the arguments. Load the shared model/optimizer. Fetch the config file.
model, _, config, args = open_experiment(apply_time_machine=True)
print(' ', flush=True)
model.reset()
print(' ', flush=True)
# Create a group of workers
print('Launching the individual workers...', flush=True)
processes = []
for rank in range(args.N):
# The workers perform roll-outs and synchronize gradients
p = mp.Process(target=synchronous_worker, args=(int(rank), config, args))
p.start()
time.sleep(0.25)
processes.append(p)
for p in processes:
p.join()
|
TincanInterface.py
|
# ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import select
try:
import simplejson as json
except ImportError:
import json
from threading import Thread
import traceback
from distutils import spawn
import controller.framework.ipoplib as ipoplib
from controller.framework.ControllerModule import ControllerModule
class TincanInterface(ControllerModule):
def __init__(self, cfx_handle, module_config, module_name):
super(TincanInterface, self).__init__(cfx_handle, module_config, module_name)
self._tincan_listener_thread = None # UDP listener thread object
self._tci_publisher = None
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock_svr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Controller UDP listening socket
self._sock_svr.bind((self._cm_config["RcvServiceAddress"],
self._cm_config["CtrlRecvPort"]))
# Controller UDP sending socket
self._dest = (self._cm_config["SndServiceAddress"], self._cm_config["CtrlSendPort"])
self._sock.bind(("", 0))
self._sock_list = [self._sock_svr]
self.iptool = spawn.find_executable("ip")
def initialize(self):
self._tincan_listener_thread = Thread(target=self.__tincan_listener)
self._tincan_listener_thread.setDaemon(True)
self._tincan_listener_thread.start()
self.create_control_link()
self._tci_publisher = self._cfx_handle.publish_subscription("TCI_TINCAN_MSG_NOTIFY")
self.register_cbt("Logger", "LOG_QUERY_CONFIG")
self.log("LOG_INFO", "Module loaded")
def __tincan_listener(self):
try:
while True:
socks, _, _ = select.select(self._sock_list, [], [],
self._cm_config["SocketReadWaitTime"])
# Iterate across all socket list to obtain Tincan messages
for sock in socks:
if sock == self._sock_svr:
data = sock.recvfrom(self._cm_config["MaxReadSize"])
ctl = json.loads(data[0].decode("utf-8"))
if ctl["IPOP"]["ProtocolVersion"] != 5:
raise ValueError("Invalid control version detected")
# Get the original CBT if this is the response
if ctl["IPOP"]["ControlType"] == "TincanResponse":
cbt = self._cfx_handle._pending_cbts[ctl["IPOP"]["TransactionId"]]
cbt.set_response(ctl["IPOP"]["Response"]["Message"],
ctl["IPOP"]["Response"]["Success"])
self.complete_cbt(cbt)
else:
self._tci_publisher.post_update(ctl["IPOP"]["Request"])
except Exception as err:
log_cbt = self.register_cbt(
"Logger", "LOG_WARNING", "Tincan Listener exception:{0}\n"
"{1}".format(err, traceback.format_exc()))
self.submit_cbt(log_cbt)
def create_control_link(self,):
self.register_cbt("Logger", "LOG_INFO", "Creating Tincan control link")
cbt = self.create_cbt(self._module_name, self._module_name, "TCI_CREATE_CTRL_LINK")
ctl = ipoplib.CTL_CREATE_CTRL_LINK
ctl["IPOP"]["TransactionId"] = cbt.tag
if self._cm_config["CtrlRecvPort"] is not None:
ctl["IPOP"]["Request"]["Port"] = self._cm_config["CtrlRecvPort"]
ctl["IPOP"]["Request"]["AddressFamily"] = "af_inet"
ctl["IPOP"]["Request"]["IP"] = self._cm_config["RcvServiceAddress"]
self._cfx_handle._pending_cbts[cbt.tag] = cbt
self.send_control(json.dumps(ctl))
def resp_handler_create_control_link(self, cbt):
if cbt.response.status == "False":
msg = "Failed to create Tincan response link: CBT={0}".format(cbt)
raise RuntimeError(msg)
def configure_tincan_logging(self, log_cfg, use_defaults=False):
cbt = self.create_cbt(self._module_name, self._module_name, "TCI_CONFIGURE_LOGGING")
ctl = ipoplib.CTL_CONFIGURE_LOGGING
ctl["IPOP"]["TransactionId"] = cbt.tag
if not use_defaults:
ctl["IPOP"]["Request"]["Level"] = log_cfg["LogLevel"]
ctl["IPOP"]["Request"]["Device"] = log_cfg["Device"]
ctl["IPOP"]["Request"]["Directory"] = log_cfg["Directory"]
ctl["IPOP"]["Request"]["Filename"] = log_cfg["TincanLogFileName"]
ctl["IPOP"]["Request"]["MaxArchives"] = log_cfg["MaxArchives"]
ctl["IPOP"]["Request"]["MaxFileSize"] = log_cfg["MaxFileSize"]
ctl["IPOP"]["Request"]["ConsoleLevel"] = log_cfg["ConsoleLevel"]
self._cfx_handle._pending_cbts[cbt.tag] = cbt
self.send_control(json.dumps(ctl))
def resp_handler_configure_tincan_logging(self, cbt):
if cbt.response.status == "False":
msg = "Failed to configure Tincan logging: CBT={0}".format(cbt)
self.register_cbt("Logger", "LOG_WARNING", msg)
def req_handler_create_link(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_CREATE_LINK
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["OverlayId"] = msg["OverlayId"]
req["TunnelId"] = msg["TunnelId"]
req["NodeId"] = msg.get("NodeId")
req["LinkId"] = msg["LinkId"]
req["PeerInfo"]["UID"] = msg["NodeData"].get("UID")
req["PeerInfo"]["MAC"] = msg["NodeData"].get("MAC")
req["PeerInfo"]["CAS"] = msg["NodeData"].get("CAS")
req["PeerInfo"]["FPR"] = msg["NodeData"].get("FPR")
# Optional overlay data to create overlay on demand
req["StunServers"] = msg.get("StunServers")
req["TurnServers"] = msg.get("TurnServers")
req["Type"] = msg["Type"]
req["TapName"] = msg.get("TapName")
req["IgnoredNetInterfaces"] = msg.get("IgnoredNetInterfaces")
self.send_control(json.dumps(ctl))
def req_handler_create_tunnel(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_CREATE_TUNNEL
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["StunServers"] = msg["StunServers"]
req["TurnServers"] = msg.get("TurnServers")
req["Type"] = msg["Type"]
req["TapName"] = msg["TapName"]
req["OverlayId"] = msg["OverlayId"]
req["TunnelId"] = msg["TunnelId"]
req["NodeId"] = msg.get("NodeId")
req["IgnoredNetInterfaces"] = msg.get("IgnoredNetInterfaces")
self.send_control(json.dumps(ctl))
def req_handler_query_candidate_address_set(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_QUERY_CAS
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["OverlayId"] = msg["OverlayId"]
req["LinkId"] = msg["LinkId"]
self.send_control(json.dumps(ctl))
def req_handler_query_link_stats(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_QUERY_LINK_STATS
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["TunnelIds"] = msg
self.send_control(json.dumps(ctl))
def req_handler_query_tunnel_info(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_QUERY_TUNNEL_INFO
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["OverlayId"] = msg["OverlayId"]
self.send_control(json.dumps(ctl))
def req_handler_remove_tunnel(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_REMOVE_TUNNEL
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["OverlayId"] = msg["OverlayId"]
req["TunnelId"] = msg["TunnelId"]
self.send_control(json.dumps(ctl))
if "TapName" in msg and msg["TapName"]:
ipoplib.runshell([self.iptool, "link", "del", "dev", msg["TapName"]])
def req_handler_remove_link(self, cbt):
msg = cbt.request.params
ctl = ipoplib.CTL_REMOVE_LINK
ctl["IPOP"]["TransactionId"] = cbt.tag
req = ctl["IPOP"]["Request"]
req["OverlayId"] = msg["OverlayId"]
req["TunnelId"] = msg["TunnelId"]
req["LinkId"] = msg["LinkId"]
self.send_control(json.dumps(ctl))
def process_cbt(self, cbt):
if cbt.op_type == "Request":
if cbt.request.action == "TCI_CREATE_LINK":
self.req_handler_create_link(cbt)
elif cbt.request.action == "TCI_REMOVE_LINK":
self.req_handler_remove_link(cbt)
elif cbt.request.action == "TCI_CREATE_TUNNEL":
self.req_handler_create_tunnel(cbt)
elif cbt.request.action == "TCI_QUERY_CAS":
self.req_handler_query_candidate_address_set(cbt)
elif cbt.request.action == "TCI_QUERY_LINK_STATS":
self.req_handler_query_link_stats(cbt)
elif cbt.request.action == "TCI_QUERY_TUNNEL_INFO":
self.req_handler_query_tunnel_info(cbt)
elif cbt.request.action == "TCI_REMOVE_TUNNEL":
self.req_handler_remove_tunnel(cbt)
else:
self.req_handler_default(cbt)
elif cbt.op_type == "Response":
if cbt.request.action == "LOG_QUERY_CONFIG":
self.configure_tincan_logging(cbt.response.data,
not cbt.response.status)
elif cbt.request.action == "TCI_CREATE_CTRL_LINK":
self.resp_handler_create_control_link(cbt)
elif cbt.request.action == "TCI_CONFIGURE_LOGGING":
self.resp_handler_configure_tincan_logging(cbt)
self.free_cbt(cbt)
def send_control(self, msg):
return self._sock.sendto(bytes(msg.encode("utf-8")), self._dest)
def timer_method(self):
pass
def terminate(self):
pass
|
test_search.py
|
import threading
import time
import pytest
import random
import numpy as np
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "search_collection"
search_num = 10
max_dim = ct.max_dim
epsilon = ct.epsilon
gracefulTime = ct.gracefulTime
default_nb = ct.default_nb
default_nb_medium = ct.default_nb_medium
default_nq = ct.default_nq
default_dim = ct.default_dim
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_int64_field_name = ct.default_int64_field_name
default_float_field_name = ct.default_float_field_name
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
class TestCollectionSearchInvalid(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function", params=ct.get_invalid_vectors)
def get_invalid_vectors(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for field")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_value(self, request):
if not isinstance(request.param, str):
pytest.skip("field value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_limit(self, request):
if isinstance(request.param, int) and request.param >= 0:
pytest.skip("positive int is valid type for limit")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for expr")
if request.param == None:
pytest.skip("None is valid for expr")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_value(self, request):
if not isinstance(request.param, str):
pytest.skip("expression value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition(self, request):
if request.param == []:
pytest.skip("empty is valid for partition")
if request.param == None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_output_fields(self, request):
if request.param == []:
pytest.skip("empty is valid for output_fields")
if request.param == None:
pytest.skip("None is valid for output_fields")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_connection(self):
"""
target: test search without connection
method: create and delete connection, then search
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. remove connection
log.info("test_search_no_connection: removing connection")
self.connection_wrap.remove_connection(alias='default')
log.info("test_search_no_connection: removed connection")
# 3. search without connection
log.info("test_search_no_connection: searching without connection")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_collection(self):
"""
target: test the scenario which search the non-exist collection
method: 1. create collection
2. drop collection
3. search the dropped collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. Drop collection
collection_w.drop()
# 3. Search without collection
log.info("test_search_no_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s doesn't exist!" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_missing(self):
"""
target: test search with incomplete parameters
method: search with incomplete parameters
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection with missing parameters
log.info("test_search_param_missing: Searching collection %s "
"with missing parameters" % collection_w.name)
try:
collection_w.search()
except TypeError as e:
assert "missing 4 required positional arguments: 'data', " \
"'anns_field', 'param', and 'limit'" in str(e)
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_vectors(self, get_invalid_vectors):
"""
target: test search with invalid parameter values
method: search with invalid data
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_vectors = get_invalid_vectors
log.info("test_search_param_invalid_vectors: searching with "
"invalid vectors: {}".format(invalid_vectors))
collection_w.search(invalid_vectors, default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`search_data` value {} is illegal".format(invalid_vectors)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_dim(self):
"""
target: test search with invalid parameter values
method: search with invalid dim
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid dim
log.info("test_search_param_invalid_dim: searching with invalid dim")
wrong_dim = 129
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The dimension of query entities "
"is different from schema"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
"""
target: test search with invalid parameter type
method: search with invalid field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_type
log.info("test_search_param_invalid_field_type: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items=
{"err_code": 1,
"err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
"""
target: test search with invalid parameter values
method: search with invalid field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_value
log.info("test_search_param_invalid_field_value: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Field %s doesn't exist in schema"
% invalid_search_field})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
"""
target: test search with invalid parameter values
method: search with invalid metric type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid metric_type
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
invalid_metric = get_invalid_metric_type
search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}}
collection_w.search(vectors[:default_nq], default_search_field, search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6727")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_invalid_params_type(self, index, params):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
if index == "FLAT":
pytest.skip("skip in FLAT index")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
is_index=True)
# 2. create index and load
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
invalid_search_params = cf.gen_invaild_search_params_type()
for invalid_search_param in invalid_search_params:
if index == invalid_search_param["index_type"]:
search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 0,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_limit_type(self, get_invalid_limit):
"""
target: test search with invalid limit type
method: search with invalid limit
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_limit = get_invalid_limit
log.info("test_search_param_invalid_limit_type: searching with "
"invalid limit: %s" % invalid_limit)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
invalid_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`limit` value %s is illegal" % invalid_limit})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("limit", [0, 16385])
def test_search_param_invalid_limit_value(self, limit):
"""
target: test search with invalid limit value
method: search with invalid limit: 0 and maximum
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid limit (topK)
log.info("test_search_param_invalid_limit: searching with "
"invalid limit (topK) = %s" % limit)
err_msg = "limit %d is too large!" % limit
if limit == 0:
err_msg = "`limit` value 0 is illegal"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_type(self, get_invalid_expr_type):
"""
target: test search with invalid parameter type
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_type
log.info("test_search_param_invalid_expr_type: searching with "
"invalid expr: {}".format(invalid_search_expr))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The type of expr must be string ,"
"but {} is given".format(type(invalid_search_expr))})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_value(self, get_invalid_expr_value):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_value
log.info("test_search_param_invalid_expr_value: searching with "
"invalid expr: %s" % invalid_search_expr)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid expression %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_invalid_type(self, get_invalid_partition):
"""
target: test search invalid partition
method: search with invalid partition type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
partition_name = get_invalid_partition
err_msg = "`partition_name_array` value {} is illegal".format(partition_name)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, partition_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
log.info("test_search_with_output_fields_invalid_type: Searching collection %s" % collection_w.name)
output_fields = get_invalid_output_fields
err_msg = "`output_fields` value {} is illegal".format(output_fields)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_collection(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release collection
3. search the released collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. release collection
collection_w.release()
# 3. Search the released collection
log.info("test_search_release_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s was not loaded "
"into memory" % collection_w.name})
@pytest.mark.tags(CaseLabel.L2)
def test_search_release_partition(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release partition
3. search with specifying the released partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num)[0]
par = collection_w.partitions
par_name = par[partition_num].name
# 2. release partition
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par_name])
# 3. Search the released partition
log.info("test_search_release_partition: Searching specifying the released partition")
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_collection(self):
"""
target: test search with empty connection
method: search the empty collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection without data before load
log.info("test_search_with_empty_collection: Searching empty collection %s"
% collection_w.name)
err_msg = "collection" + collection_w.name + "was not loaded into memory"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, timeout=1,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
# 3. search collection without data after load
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
@pytest.mark.tags(CaseLabel.L1)
def test_search_partition_deleted(self):
"""
target: test search deleted partition
method: 1. search the collection
2. delete a partition
3. search the deleted partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
# 2. delete partitions
log.info("test_search_partition_deleted: deleting a partition")
par = collection_w.partitions
deleted_par_name = par[partition_num].name
collection_w.drop_partition(deleted_par_name)
log.info("test_search_partition_deleted: deleted a partition")
collection_w.load()
# 3. search after delete partitions
log.info("test_search_partition_deleted: searching deleted partition")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
[deleted_par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % deleted_par_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6731")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_different_index_invalid_params(self, nq, dim, index, params, auto_id, _async):
"""
target: test search with different index
method: test search with different index
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim//4
log.info("test_search_different_index_invalid_params: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_different_index_invalid_params: Created index-%s" % index)
collection_w.load()
# 3. search
log.info("test_search_different_index_invalid_params: Searching after creating index-%s" % index)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partition_not_existed(self):
"""
target: test search not existed partition
method: search with not existed partition
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search the non exist partition
partition_name = "search_non_exist"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, [partition_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % partition_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_binary(self):
"""
target: test search within binary data (invalid parameter)
method: search with wrong metric type
expected: raise exception and report the error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. search with exception
binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1]
wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "unsupported"})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self):
"""
target: search binary collection using FlAT with L2
method: search binary collection using FLAT with L2
expected: raise exception and report error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. search and assert
query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_not_exist(self):
"""
target: test search with output fields
method: search with non-exist output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=["int63"],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: 'Field int63 not exist'})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]])
def test_search_output_field_vector(self, output_fields):
"""
target: test search with vector as output field
method: search with one vector output_field or
wildcard for vector
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search doesn't support "
"vector field as output_fields"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]])
def test_search_output_field_invalid_wildcard(self, output_fields):
"""
target: test search with invalid output wildcard
method: search with invalid output_field wildcard
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": f"Field {output_fields[-1]} not exist"})
class TestCollectionSearch(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function",
params=[default_nb, default_nb_medium])
def nb(self, request):
yield request.param
@pytest.fixture(scope="function", params=[2, 500])
def nq(self, request):
yield request.param
@pytest.fixture(scope="function", params=[8, 128])
def dim(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def auto_id(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def _async(self, request):
yield request.param
"""
******************************************************************
# The following are valid base cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_search_normal(self, nq, dim, auto_id):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tag(CaseLabel.L0)
def test_search_with_hit_vectors(self, nq, dim, auto_id):
"""
target: test search with vectors in collections
method: create connections,collection insert and search vectors in collections
expected: search successfully with limit(topK) and can be hit at top 1 (min distance is 0)
"""
collection_w, _vectors, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)
# get vectors that inserted into collection
vectors = np.array(_vectors[0]).tolist()
vectors = [vectors[i][-1] for i in range(nq)]
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
for hits in search_res:
# verify that top 1 hit is itself,so min distance is 0
assert hits.distances[0] == 0.0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_vectors(self, dim, auto_id, _async):
"""
target: test search with empty query vector
method: search using empty query vector
expected: search successfully with 0 results
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True,
auto_id=auto_id, dim=dim)[0]
# 2. search collection without data
log.info("test_search_with_empty_vectors: Searching collection %s "
"using empty vector" % collection_w.name)
collection_w.search([], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}])
def test_search_normal_default_params(self, dim, auto_id, search_params, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_before_after_delete(self, nq, dim, auto_id, _async):
"""
target: test search function before and after deletion
method: 1. search the collection
2. delete a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_before_after_delete: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. delete partitions
log.info("test_search_before_after_delete: deleting a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
collection_w.drop_partition(par[partition_num].name)
log.info("test_search_before_after_delete: deleted a partition")
collection_w.load()
# 4. search non-deleted part after delete partitions
log.info("test_search_before_after_delete: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit-deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_one(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_one: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release one partition
log.info("test_search_partition_after_release_one: releasing a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[partition_num].name])
log.info("test_search_partition_after_release_one: released a partition")
# 4. search collection after release one partition
log.info("test_search_partition_after_release_one: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_all(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_all: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release all partitions
log.info("test_search_partition_after_release_all: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[0].name, par[1].name])
log.info("test_search_partition_after_release_all: released a partition")
# 4. search collection after release all partitions
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release collection
3. load collection
4. search the pre-released collection
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)
# 2. release collection
collection_w.release()
# 3. Search the pre-released collection after load
collection_w.load()
log.info("test_search_collection_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6997")
def test_search_partition_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release a partition
3. load partition
4. search the pre-released partition
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)
# 2. release collection
log.info("test_search_partition_after_release_load: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[1].name])
log.info("test_search_partition_after_release_load: released a partition")
# 3. Search the collection after load
limit = 1000
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 4. Search the pre-released partition after load
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp,
[par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_load_flush_load(self, nb, nq, dim, auto_id, _async):
"""
target: test search when load before flush
method: 1. search the collection
2. insert data and load
3. flush, and load
expected: search success with limit(topK)
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, auto_id=auto_id, dim=dim)[0]
# 2. insert data
insert_ids = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=dim)[3]
# 3. load data
collection_w.load()
# 4. flush and load
collection_w.num_entities
collection_w.load()
# 5. search for new data without load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_new_data(self, nq, dim, auto_id, _async):
"""
target: test search new inserted data without load
method: 1. search the collection
2. insert new data
3. search the collection without load again
expected: new data should be searched
"""
# 1. initialize with data
limit = 1000
nb_old = 500
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old,
auto_id=auto_id,
dim=dim)
# 2. search for original data after load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_new_data: searching for original data after load")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old,
"_async": _async})
# 3. insert new data
nb_new = 300
insert_ids_new = cf.insert_data(collection_w, nb_new,
auto_id=auto_id, dim=dim)[3]
insert_ids.extend(insert_ids_new)
# gracefulTime is default as 1s which allows data
# could not be searched instantly in gracefulTime
time.sleep(gracefulTime)
# 4. search for new data without load
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old+nb_new,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_max_dim(self, nq, auto_id, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, default_nb,
auto_id=auto_id,
dim=max_dim)
# 2. search
log.info("test_search_max_dim: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(max_dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, 2,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)
# 2. create index and load
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim//4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim//4
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_index_different_metric_type(self, dim, index, params, auto_id, _async):
"""
target: test search with different metric type
method: test search with different metric type
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim//4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim//4
log.info("test_search_after_index_different_metric_type: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "IP"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_index_different_metric_type: Created index-%s" % index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index, "IP")
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async):
"""
target: test search for multiple times
method: search for multiple times
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search for multiple times
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_collection_multiple_times: searching round %d" % (i+1))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_sync_async_multiple_times(self, nb, nq, dim, auto_id):
"""
target: test async search after sync search case
method: create connection, collection, insert,
sync search and async search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_sync_async_multiple_times: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_sync_async_multiple_times: searching round %d" % (i + 1))
for _async in [False, True]:
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_multiple_vectors(self, nb, nq, dim, auto_id, _async):
"""
target: test search with multiple vectors
method: create connection, collection with multiple
vectors, insert and search
expected: search successfully with limit(topK)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=dim), cf.gen_float_vec_field(name="tmp", dim=dim)]
schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id)
collection_w = self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={"name": c_name, "schema": schema})[0]
# 3. insert
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors_tmp = [[random.random() for _ in range(dim)] for _ in range(nb)]
data = [[i for i in range(nb)], [np.float32(i) for i in range(nb)], vectors, vectors_tmp]
if auto_id:
data = [[np.float32(i) for i in range(nb)], vectors, vectors_tmp]
res = collection_w.insert(data)
insert_ids = res.primary_keys
assert collection_w.num_entities == nb
# 4. load
collection_w.load()
# 5. search all the vectors
log.info("test_search_multiple_vectors: searching collection %s" % collection_w.name)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
collection_w.search(vectors[:nq], "tmp",
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_one_partition(self, nb, auto_id, _async):
"""
target: test search from partition
method: search from one partition
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
is_index=True)
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search in one partition
log.info("test_search_index_one_partition: searching (1000 entities) through one partition")
limit = 1000
par = collection_w.partitions
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
search_params = {"metric_type": "L2", "params": {"nprobe": 128}}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, limit, default_search_exp,
[par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, nb, nq, dim, auto_id, _async):
"""
target: test search from partitions
method: search from partitions
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim,
is_index=True)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search through partitions
log.info("test_search_index_partitions: searching (1000 entities) through partitions")
par = collection_w.partitions
log.info("test_search_index_partitions: partitions: %s" % par)
limit = 1000
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[0].name, par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_names",
[["(.*)"], ["search(.*)"]])
def test_search_index_partitions_fuzzy(self, nb, nq, dim, partition_names, auto_id, _async):
"""
target: test search from partitions
method: search from partitions with fuzzy
partition name
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions_fuzzy: searching through partitions")
limit = 1000
limit_check = limit
par = collection_w.partitions
if partition_names == ["search(.*)"]:
insert_ids = insert_ids[par[0].num_entities:]
if limit > par[1].num_entities:
limit_check = par[1].num_entities
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
partition_names, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_empty(self, nq, dim, auto_id, _async):
"""
target: test search the empty partition
method: search from the empty partition
expected: searched successfully with 0 results
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, auto_id=auto_id,
dim=dim, is_index=True)[0]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create empty partition
partition_name = "search_partition_empty"
collection_w.create_partition(partition_name=partition_name, description="search partition empty")
par = collection_w.partitions
log.info("test_search_index_partition_empty: partitions: %s" % par)
collection_w.load()
# 3. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 4. search the empty partition
log.info("test_search_index_partition_empty: searching %s "
"entities through empty partition" % default_limit)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, [partition_name],
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_jaccard_flat_index(self, nq, dim, auto_id, _async):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with JACCARD
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_hamming_flat_index(self, nq, dim, auto_id, _async):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with HAMMING
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "HAMMING"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
collection_w.load()
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6843")
def test_search_binary_tanimoto_flat_index(self, nq, dim, auto_id, _async):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with TANIMOTO
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)
log.info("auto_id= %s, _async= %s" % (auto_id, _async))
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "TANIMOTO"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("expression, limit",
zip(cf.gen_normal_expressions(),
[1000, 999, 898, 997, 2, 3]))
def test_search_with_expression(self, dim, expression, limit, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
nb, dim=dim,
is_index=True)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with different expressions
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expression, limit",
zip(cf.gen_normal_expressions_field(default_float_field_name),
[1000, 999, 898, 997, 2, 3]))
def test_search_with_expression_auto_id(self, dim, expression, limit, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=True,
dim=dim,
is_index=True)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with different expressions
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async):
"""
target: test search using different supported data type
method: search using different supported data type
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with empty output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_field(self, auto_id, _async):
"""
target: test search with output fields
method: search with one output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)
# 2. search
log.info("test_search_with_output_field: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert default_int64_field_name in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with multiple output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]])
def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async):
"""
target: test search with output fields using wildcard
method: search with one output_field (wildcard)
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)
# 2. search
log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=output_fields,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, nb, nq, dim, auto_id, _async):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
self._connect()
collection_num = 10
for i in range(collection_num):
# 1. initialize with data
log.info("test_search_multi_collections: search round %d" % (i + 1))
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" %
(default_limit, nq, collection_w.name))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_concurrent_multi_threads(self, nb, nq, dim, auto_id, _async):
"""
target: test concurrent search with multi-processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
# 1. initialize with data
threads_num = 10
threads = []
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
def search(collection_w):
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
# 2. search with multi-processes
log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num)
for i in range(threads_num):
t = threading.Thread(target=search, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
|
clang_format.py
|
#!/usr/bin/env python
"""
A script that provides:
1. Ability to grab binaries where possible from LLVM.
2. Ability to download binaries from MongoDB cache for clang-format.
3. Validates clang-format is the right version.
4. Has support for checking which files are to be checked.
5. Supports validating and updating a set of files to the right coding style.
"""
from __future__ import print_function, absolute_import
import Queue
import difflib
import glob
import itertools
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import urllib2
from distutils import spawn
from optparse import OptionParser
from multiprocessing import cpu_count
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
from buildscripts import moduleconfig
##############################################################################
#
# Constants for clang-format
#
#
# Expected version of clang-format
CLANG_FORMAT_VERSION = "3.8.0"
CLANG_FORMAT_SHORT_VERSION = "3.8"
# Name of clang-format as a binary
CLANG_FORMAT_PROGNAME = "clang-format"
# URL location of the "cached" copy of clang-format to download
# for users which do not have clang-format installed
CLANG_FORMAT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang-format-3.8-rhel55.tar.gz"
CLANG_FORMAT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang%2Bllvm-3.8.0-x86_64-apple-darwin.tar.xz"
# Path in the tarball to the clang-format binary
CLANG_FORMAT_SOURCE_TAR_BASE = string.Template("clang+llvm-$version-$tar_path/bin/" + CLANG_FORMAT_PROGNAME)
# Path to the modules in the mongodb source tree
# Has to match the string in SConstruct
MODULE_DIR = "src/mongo/db/modules"
##############################################################################
# Copied from python 2.7 version of subprocess.py
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return ("Command '%s' returned non-zero exit status %d with output %s" %
(self.cmd, self.returncode, self.output))
# Copied from python 2.7 version of subprocess.py
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output)
return output
def callo(args):
"""Call a program, and capture its output
"""
return check_output(args)
def get_tar_path(version, tar_path):
""" Get the path to clang-format in the llvm tarball
"""
return CLANG_FORMAT_SOURCE_TAR_BASE.substitute(
version=version,
tar_path=tar_path)
def extract_clang_format(tar_path):
# Extract just the clang-format binary
# On OSX, we shell out to tar because tarfile doesn't support xz compression
if sys.platform == 'darwin':
subprocess.call(['tar', '-xzf', tar_path, '*clang-format*'])
# Otherwise we use tarfile because some versions of tar don't support wildcards without
# a special flag
else:
tarfp = tarfile.open(tar_path)
for name in tarfp.getnames():
if name.endswith('clang-format'):
tarfp.extract(name)
tarfp.close()
def get_clang_format_from_cache_and_extract(url, tarball_ext):
"""Get clang-format from mongodb's cache
and extract the tarball
"""
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar" + tarball_ext)
# Download from file
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
# Retry download up to 5 times.
num_tries = 5
for attempt in range(num_tries):
try:
resp = urllib2.urlopen(url)
with open(temp_tar_file, 'wb') as f:
f.write(resp.read())
break
except urllib2.URLError:
if attempt == num_tries - 1:
raise
continue
extract_clang_format(temp_tar_file)
def get_clang_format_from_darwin_cache(dest_file):
"""Download clang-format from llvm.org, unpack the tarball,
and put clang-format in the specified place
"""
get_clang_format_from_cache_and_extract(CLANG_FORMAT_HTTP_DARWIN_CACHE, ".xz")
# Destination Path
shutil.move(get_tar_path(CLANG_FORMAT_VERSION, "x86_64-apple-darwin"), dest_file)
def get_clang_format_from_linux_cache(dest_file):
"""Get clang-format from mongodb's cache
"""
get_clang_format_from_cache_and_extract(CLANG_FORMAT_HTTP_LINUX_CACHE, ".gz")
# Destination Path
shutil.move("build/bin/clang-format", dest_file)
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formating an individual file
"""
def __init__(self, path, cache_dir):
self.path = None
clang_format_progname_ext = ""
if sys.platform == "win32":
clang_format_progname_ext += ".exe"
# Check the clang-format the user specified
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find clang-format %s" % (path))
# Check the environment variable
if "MONGO_CLANG_FORMAT" in os.environ:
self.path = os.environ["MONGO_CLANG_FORMAT"]
if self.path and not self._validate_version():
self.path = None
# Check the users' PATH environment variable now
if self.path is None:
# Check for various versions staring with binaries with version specific suffixes in the
# user's path
programs = [
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION,
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_SHORT_VERSION,
CLANG_FORMAT_PROGNAME,
]
if sys.platform == "win32":
for i in range(len(programs)):
programs[i] += '.exe'
for program in programs:
self.path = spawn.find_executable(program)
if self.path:
if not self._validate_version():
self.path = None
else:
break
# If Windows, try to grab it from Program Files
# Check both native Program Files and WOW64 version
if sys.platform == "win32":
programfiles = [
os.environ["ProgramFiles"],
os.environ["ProgramFiles(x86)"],
]
for programfile in programfiles:
win32bin = os.path.join(programfile, "LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.path = win32bin
break
# Have not found it yet, download it from the web
if self.path is None:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
self.path = os.path.join(cache_dir, CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION + clang_format_progname_ext)
# Download a new version if the cache is empty or stale
if not os.path.isfile(self.path) or not self._validate_version():
if sys.platform.startswith("linux"):
get_clang_format_from_linux_cache(self.path)
elif sys.platform == "darwin":
get_clang_format_from_darwin_cache(self.path)
else:
print("ERROR: clang-format.py does not support downloading clang-format " +
" on this platform, please install clang-format " + CLANG_FORMAT_VERSION)
# Validate we have the correct version
# We only can fail here if the user specified a clang-format binary and it is the wrong
# version
if not self._validate_version():
print("ERROR: exiting because of previous warning.")
sys.exit(1)
self.print_lock = threading.Lock()
def _validate_version(self):
"""Validate clang-format is the expected version
"""
cf_version = callo([self.path, "--version"])
if CLANG_FORMAT_VERSION in cf_version:
return True
print("WARNING: clang-format found in path, but incorrect version found at " +
self.path + " with version: " + cf_version)
return False
def _lint(self, file_name, print_diff):
"""Check the specified file has the correct format
"""
with open(file_name, 'rb') as original_text:
original_file = original_text.read()
# Get formatted file as clang-format would format the file
formatted_file = callo([self.path, "--style=file", file_name])
if original_file != formatted_file:
if print_diff:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed when printed to the screen
with self.print_lock:
print("ERROR: Found diff for " + file_name)
print("To fix formatting errors, run %s --style=file -i %s" %
(self.path, file_name))
for line in result:
print(line.rstrip())
return False
return True
def lint(self, file_name):
"""Check the specified file has the correct format
"""
return self._lint(file_name, print_diff=True)
def format(self, file_name):
"""Update the format of the specified file
"""
if self._lint(file_name, print_diff=False):
return True
# Update the file with clang-format
formatted = not subprocess.call([self.path, "--style=file", "-i", file_name])
# Version 3.8 generates files like foo.cpp~RF83372177.TMP when it formats foo.cpp
# on Windows, we must clean these up
if sys.platform == "win32":
glob_pattern = file_name + "*.TMP"
for fglob in glob.glob(glob_pattern):
os.unlink(fglob)
return formatted
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = Queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
pp_lock = threading.Lock()
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except Queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
with pp_lock:
pp_result[0] = False
pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).rstrip()
except:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_repos():
"""Get a list of Repos to check clang-format for
"""
base_dir = get_base_dir()
# Get a list of modules
# TODO: how do we filter rocks, does it matter?
mongo_modules = moduleconfig.discover_module_directories(
os.path.join(base_dir, MODULE_DIR), None)
paths = [os.path.join(base_dir, MODULE_DIR, m) for m in mongo_modules]
paths.append(base_dir)
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository, and return the captured output
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return callo(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _callgit(self, args):
"""Call git for this repository without capturing output
This is designed to be used when git returns non-zero exit codes.
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return subprocess.call(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def _git_ls_files(self, cmd):
"""Run git-ls-files and filter the list of files to a valid candidate list
"""
gito = self._callgito(cmd)
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [line.rstrip()
for line in gito.splitlines()
if (line.startswith("jstests") or line.startswith("src"))
and not line.startswith("src/third_party/")
and not line.startswith("src/mongo/gotools/")]
files_match = re.compile('\\.(h|cpp|js)$')
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached"])
def get_working_tree_candidate_files(self):
"""Query git to get a list of all files in the working tree to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached", "--others"])
def get_working_tree_candidates(self):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
valid_files = list(self.get_working_tree_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
# Filter out files that git thinks exist but were removed.
valid_files = [f for f in valid_files if os.path.exists(f)]
return valid_files
def is_detached(self):
"""Is the current working tree in a detached HEAD state?
"""
# symbolic-ref returns 1 if the repo is in a detached HEAD state
return self._callgit(["symbolic-ref", "--quiet", "HEAD"])
def is_ancestor(self, parent, child):
"""Is the specified parent hash an ancestor of child hash?
"""
# merge base returns 0 if parent is an ancestor of child
return not self._callgit(["merge-base", "--is-ancestor", parent, child])
def is_commit(self, sha1):
"""Is the specified hash a valid git commit?
"""
# cat-file -e returns 0 if it is a valid hash
return not self._callgit(["cat-file", "-e", "%s^{commit}" % sha1])
def is_working_tree_dirty(self):
"""Does the current working tree have changes?
"""
# diff returns 1 if the working tree has local changes
return self._callgit(["diff", "--quiet"])
def does_branch_exist(self, branch):
"""Does the branch exist?
"""
# rev-parse returns 0 if the branch exists
return not self._callgit(["rev-parse", "--verify", branch])
def get_merge_base(self, commit):
"""Get the merge base between 'commit' and HEAD
"""
return self._callgito(["merge-base", "HEAD", commit]).rstrip()
def get_branch_name(self):
"""Get the current branch name, short form
This returns "master", not "refs/head/master"
Will not work if the current branch is detached
"""
branch = self.rev_parse(["--abbrev-ref", "HEAD"])
if branch == "HEAD":
raise ValueError("Branch is currently detached")
return branch
def add(self, command):
"""git add wrapper
"""
return self._callgito(["add"] + command)
def checkout(self, command):
"""git checkout wrapper
"""
return self._callgito(["checkout"] + command)
def commit(self, command):
"""git commit wrapper
"""
return self._callgito(["commit"] + command)
def diff(self, command):
"""git diff wrapper
"""
return self._callgito(["diff"] + command)
def log(self, command):
"""git log wrapper
"""
return self._callgito(["log"] + command)
def rev_parse(self, command):
"""git rev-parse wrapper
"""
return self._callgito(["rev-parse"] + command).rstrip()
def rm(self, command):
"""git rm wrapper
"""
return self._callgito(["rm"] + command)
def show(self, command):
"""git show wrapper
"""
return self._callgito(["show"] + command)
def get_list_from_lines(lines):
""""Convert a string containing a series of lines into a list of strings
"""
return [line.rstrip() for line in lines.splitlines()]
def get_files_to_check_working_tree():
"""Get a list of files to check form the working tree.
This will pick up files not managed by git.
"""
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_working_tree_candidates() for r in repos]))
return valid_files
def get_files_to_check():
"""Get a list of files that need to be checked
based on which files are managed by git.
"""
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(None) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""Take a patch file generated by git diff, and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(r"^diff --git a\/([\w\/\.\-]+) b\/[\w\/\.\-]+")
lines = []
for patch in patches:
with open(patch, "rb") as infile:
lines += infile.readlines()
candidates = [check.match(line).group(1) for line in lines if check.match(line)]
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def _get_build_dir():
"""Get the location of the scons' build directory in case we need to download clang-format
"""
return os.path.join(get_base_dir(), "build")
def _lint_files(clang_format, files):
"""Lint a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
lint_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.lint)
if not lint_clean:
print("ERROR: Code Style does not match coding style")
sys.exit(1)
def lint_patch(clang_format, infile):
"""Lint patch command entry point
"""
files = get_files_to_check_from_patch(infile)
# Patch may have files that we do not want to check which is fine
if files:
_lint_files(clang_format, files)
def lint(clang_format):
"""Lint files command entry point
"""
files = get_files_to_check()
_lint_files(clang_format, files)
return True
def lint_all(clang_format):
"""Lint files command entry point based on working tree
"""
files = get_files_to_check_working_tree()
_lint_files(clang_format, files)
return True
def _format_files(clang_format, files):
"""Format a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
format_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.format)
if not format_clean:
print("ERROR: failed to format files")
sys.exit(1)
def format_func(clang_format):
"""Format files command entry point
"""
files = get_files_to_check()
_format_files(clang_format, files)
def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reformat):
"""Reformat a branch made before a clang-format run
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
if os.getcwd() != get_base_dir():
raise ValueError("reformat-branch must be run from the repo root")
if not os.path.exists("buildscripts/clang_format.py"):
raise ValueError("reformat-branch is only supported in the mongo repo")
repo = Repo(get_base_dir())
# Validate that user passes valid commits
if not repo.is_commit(commit_prior_to_reformat):
raise ValueError("Commit Prior to Reformat '%s' is not a valid commit in this repo" %
commit_prior_to_reformat)
if not repo.is_commit(commit_after_reformat):
raise ValueError("Commit After Reformat '%s' is not a valid commit in this repo" %
commit_after_reformat)
if not repo.is_ancestor(commit_prior_to_reformat, commit_after_reformat):
raise ValueError(("Commit Prior to Reformat '%s' is not a valid ancestor of Commit After" +
" Reformat '%s' in this repo") % (commit_prior_to_reformat, commit_after_reformat))
# Validate the user is on a local branch that has the right merge base
if repo.is_detached():
raise ValueError("You must not run this script in a detached HEAD state")
# Validate the user has no pending changes
if repo.is_working_tree_dirty():
raise ValueError("Your working tree has pending changes. You must have a clean working tree before proceeding.")
merge_base = repo.get_merge_base(commit_prior_to_reformat)
if not merge_base == commit_prior_to_reformat:
raise ValueError("Please rebase to '%s' and resolve all conflicts before running this script" % (commit_prior_to_reformat))
# We assume the target branch is master, it could be a different branch if needed for testing
merge_base = repo.get_merge_base("master")
if not merge_base == commit_prior_to_reformat:
raise ValueError("This branch appears to already have advanced too far through the merge process")
# Everything looks good so lets start going through all the commits
branch_name = repo.get_branch_name()
new_branch = "%s-reformatted" % branch_name
if repo.does_branch_exist(new_branch):
raise ValueError("The branch '%s' already exists. Please delete the branch '%s', or rename the current branch." % (new_branch, new_branch))
commits = get_list_from_lines(repo.log(["--reverse", "--pretty=format:%H", "%s..HEAD" % commit_prior_to_reformat]))
previous_commit_base = commit_after_reformat
files_match = re.compile('\\.(h|cpp|js)$')
# Go through all the commits the user made on the local branch and migrate to a new branch
# that is based on post_reformat commits instead
for commit_hash in commits:
repo.checkout(["--quiet", commit_hash])
deleted_files = []
# Format each of the files by checking out just a single commit from the user's branch
commit_files = get_list_from_lines(repo.diff(["HEAD~", "--name-only"]))
for commit_file in commit_files:
# Format each file needed if it was not deleted
if not os.path.exists(commit_file):
print("Skipping file '%s' since it has been deleted in commit '%s'" % (
commit_file, commit_hash))
deleted_files.append(commit_file)
continue
if files_match.search(commit_file):
clang_format.format(commit_file)
else:
print("Skipping file '%s' since it is not a file clang_format should format" %
commit_file)
# Check if anything needed reformatting, and if so amend the commit
if not repo.is_working_tree_dirty():
print ("Commit %s needed no reformatting" % commit_hash)
else:
repo.commit(["--all", "--amend", "--no-edit"])
# Rebase our new commit on top the post-reformat commit
previous_commit = repo.rev_parse(["HEAD"])
# Checkout the new branch with the reformatted commits
# Note: we will not name as a branch until we are done with all commits on the local branch
repo.checkout(["--quiet", previous_commit_base])
# Copy each file from the reformatted commit on top of the post reformat
diff_files = get_list_from_lines(repo.diff(["%s~..%s" % (previous_commit, previous_commit),
"--name-only"]))
for diff_file in diff_files:
# If the file was deleted in the commit we are reformatting, we need to delete it again
if diff_file in deleted_files:
repo.rm([diff_file])
continue
# The file has been added or modified, continue as normal
file_contents = repo.show(["%s:%s" % (previous_commit, diff_file)])
root_dir = os.path.dirname(diff_file)
if root_dir and not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(diff_file, "w+") as new_file:
new_file.write(file_contents)
repo.add([diff_file])
# Create a new commit onto clang-formatted branch
repo.commit(["--reuse-message=%s" % previous_commit])
previous_commit_base = repo.rev_parse(["HEAD"])
# Create a new branch to mark the hashes we have been using
repo.checkout(["-b", new_branch])
print("reformat-branch is done running.\n")
print("A copy of your branch has been made named '%s', and formatted with clang-format.\n" % new_branch)
print("The original branch has been left unchanged.")
print("The next step is to rebase the new branch on 'master'.")
def usage():
"""Print usage
"""
print("clang-format.py supports 5 commands [ lint, lint-all, lint-patch, format, reformat-branch].")
def main():
"""Main entry point
"""
parser = OptionParser()
parser.add_option("-c", "--clang-format", type="string", dest="clang_format")
(options, args) = parser.parse_args(args=sys.argv)
if len(args) > 1:
command = args[1]
if command == "lint":
lint(options.clang_format)
elif command == "lint-all":
lint_all(options.clang_format)
elif command == "lint-patch":
lint_patch(options.clang_format, args[2:])
elif command == "format":
format_func(options.clang_format)
elif command == "reformat-branch":
if len(args) < 3:
print("ERROR: reformat-branch takes two parameters: commit_prior_to_reformat commit_after_reformat")
return
reformat_branch(options.clang_format, args[2], args[3])
else:
usage()
else:
usage()
if __name__ == "__main__":
main()
|
session.py
|
"""API for communicating with twitch"""
from __future__ import absolute_import
import functools
import logging
import os
import threading
import m3u8
import oauthlib.oauth2
import requests
import requests.utils
import requests_oauthlib
from pytwitcherapi.chat import client
from . import constants, exceptions, models, oauth
__all__ = ['needs_auth', 'TwitchSession']
log = logging.getLogger(__name__)
TWITCH_KRAKENURL = 'https://api.twitch.tv/kraken/'
"""The baseurl for the twitch api"""
TWITCH_HEADER_ACCEPT = 'application/vnd.twitchtv.v3+json'
"""The header for the ``Accept`` key to tell twitch which api version it should use"""
TWITCH_USHERURL = 'http://usher.twitch.tv/api/'
"""The baseurl for the twitch usher api"""
TWITCH_APIURL = 'http://api.twitch.tv/api/'
"""The baseurl for the old twitch api"""
TWITCH_STATUSURL = 'http://twitchstatus.com/api/status?type=chat'
AUTHORIZATION_BASE_URL = 'https://api.twitch.tv/kraken/oauth2/authorize'
"""Authorisation Endpoint"""
CLIENT_ID = os.environ.get("PYTWITCHER_CLIENT_ID") or '642a2vtmqfumca8hmfcpkosxlkmqifb'
"""The client id of pytwitcher on twitch.
Use environment variable ``PYTWITCHER_CLIENT_ID`` or pytwitcher default value.
"""
SCOPES = ['user_read', 'chat_login']
"""The scopes that PyTwitcher needs"""
def needs_auth(meth):
"""Wraps a method of :class:`TwitchSession` and
raises an :class:`exceptions.NotAuthorizedError`
if before calling the method, the session isn't authorized.
:param meth:
:type meth:
:returns: the wrapped method
:rtype: Method
:raises: None
"""
@functools.wraps(meth)
def wrapped(*args, **kwargs):
if not args[0].authorized:
raise exceptions.NotAuthorizedError('Please login first!')
return meth(*args, **kwargs)
return wrapped
class OAuthSession(requests_oauthlib.OAuth2Session):
"""Session with oauth2 support.
You can still use http requests.
"""
def __init__(self):
"""Initialize a new oauth session
:raises: None
"""
client = oauth.TwitchOAuthClient(client_id=CLIENT_ID)
super(OAuthSession, self).__init__(client_id=CLIENT_ID,
client=client,
scope=SCOPES,
redirect_uri=constants.REDIRECT_URI)
self.login_server = None
"""The server that handles the login redirect"""
self.login_thread = None
"""The thread that serves the login server"""
def request(self, method, url, **kwargs):
"""Constructs a :class:`requests.Request`, prepares it and sends it.
Raises HTTPErrors by default.
:param method: method for the new :class:`Request` object.
:type method: :class:`str`
:param url: URL for the new :class:`Request` object.
:type url: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
"""
if oauthlib.oauth2.is_secure_transport(url):
m = super(OAuthSession, self).request
else:
m = super(requests_oauthlib.OAuth2Session, self).request
log.debug("%s \"%s\" with %s", method, url, kwargs)
response = m(method, url, **kwargs)
response.raise_for_status()
return response
def start_login_server(self, ):
"""Start a server that will get a request from a user logging in.
This uses the Implicit Grant Flow of OAuth2. The user is asked
to login to twitch and grant PyTwitcher authorization.
Once the user agrees, he is redirected to an url.
This server will respond to that url and get the oauth token.
The server serves in another thread. To shut him down, call
:meth:`TwitchSession.shutdown_login_server`.
This sets the :data:`TwitchSession.login_server`,
:data:`TwitchSession.login_thread` variables.
:returns: The created server
:rtype: :class:`BaseHTTPServer.HTTPServer`
:raises: None
"""
self.login_server = oauth.LoginServer(session=self)
target = self.login_server.serve_forever
self.login_thread = threading.Thread(target=target)
self.login_thread.setDaemon(True)
log.debug('Starting login server thread.')
self.login_thread.start()
def shutdown_login_server(self, ):
"""Shutdown the login server and thread
:returns: None
:rtype: None
:raises: None
"""
log.debug('Shutting down the login server thread.')
self.login_server.shutdown()
self.login_server.server_close()
self.login_thread.join()
def get_auth_url(self, ):
"""Return the url for the user to authorize PyTwitcher
:returns: The url the user should visit to authorize PyTwitcher
:rtype: :class:`str`
:raises: None
"""
return self.authorization_url(AUTHORIZATION_BASE_URL)[0]
class TwitchSession(OAuthSession):
"""Session for making requests to the twitch api
Use :meth:`TwitchSession.kraken_request`,
:meth:`TwitchSession.usher_request`,
:meth:`TwitchSession.oldapi_request` to make easier calls to the api
directly.
To get authorization, the user has to grant PyTwitcher access.
The workflow goes like this:
1. Start the login server with :meth:`TwitchSession.start_login_server`.
2. User should visit :meth:`TwitchSession.get_auth_url` in his
browser and follow insturctions (e.g Login and Allow PyTwitcher).
3. Check if the session is authorized with :meth:`TwitchSession.authorized`.
4. Shut the login server down with :meth:`TwitchSession.shutdown_login_server`.
Now you can use methods that need authorization.
"""
def __init__(self):
"""Initialize a new TwitchSession
:raises: None
"""
super(TwitchSession, self).__init__()
self.baseurl = ''
"""The baseurl that gets prepended to every request url"""
self.current_user = None
"""The currently logined user."""
self._token = None
"""The oauth token"""
@property
def token(self, ):
"""Return the oauth token
:returns: the token
:rtype: :class:`dict`
:raises: None
"""
return self._token
@token.setter
def token(self, token):
"""Set the oauth token and the current_user
:param token: the oauth token
:type token: :class:`dict`
:returns: None
:rtype: None
:raises: None
"""
self._token = token
if token:
self.current_user = self.query_login_user()
def kraken_request(self, method, endpoint, **kwargs):
"""Make a request to one of the kraken api endpoints.
Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`.
Also the client id from :data:`CLIENT_ID` will be set.
The url will be constructed of :data:`TWITCH_KRAKENURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the kraken api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
"""
url = TWITCH_KRAKENURL + endpoint
headers = kwargs.setdefault('headers', {})
headers['Accept'] = TWITCH_HEADER_ACCEPT
headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits
return self.request(method, url, **kwargs)
def usher_request(self, method, endpoint, **kwargs):
"""Make a request to one of the usher api endpoints.
The url will be constructed of :data:`TWITCH_USHERURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the usher api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
"""
url = TWITCH_USHERURL + endpoint
return self.request(method, url, **kwargs)
def oldapi_request(self, method, endpoint, **kwargs):
"""Make a request to one of the old api endpoints.
The url will be constructed of :data:`TWITCH_APIURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the old api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
"""
headers = kwargs.setdefault('headers', {})
headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits
url = TWITCH_APIURL + endpoint
return self.request(method, url, **kwargs)
def fetch_viewers(self, game):
"""Query the viewers and channels of the given game and
set them on the object
:returns: the given game
:rtype: :class:`models.Game`
:raises: None
"""
r = self.kraken_request('GET', 'streams/summary',
params={'game': game.name}).json()
game.viewers = r['viewers']
game.channels = r['channels']
return game
def search_games(self, query, live=True):
"""Search for games that are similar to the query
:param query: the query string
:type query: :class:`str`
:param live: If true, only returns games that are live on at least one
channel
:type live: :class:`bool`
:returns: A list of games
:rtype: :class:`list` of :class:`models.Game` instances
:raises: None
"""
r = self.kraken_request('GET', 'search/games',
params={'query': query,
'type': 'suggest',
'live': live})
games = models.Game.wrap_search(r)
for g in games:
self.fetch_viewers(g)
return games
def top_games(self, limit=10, offset=0):
"""Return the current top games
:param limit: the maximum amount of top games to query
:type limit: :class:`int`
:param offset: the offset in the top games
:type offset: :class:`int`
:returns: a list of top games
:rtype: :class:`list` of :class:`models.Game`
:raises: None
"""
r = self.kraken_request('GET', 'games/top',
params={'limit': limit,
'offset': offset})
return models.Game.wrap_topgames(r)
def get_game(self, name):
"""Get the game instance for a game name
:param name: the name of the game
:type name: :class:`str`
:returns: the game instance
:rtype: :class:`models.Game` | None
:raises: None
"""
games = self.search_games(query=name, live=False)
for g in games:
if g.name == name:
return g
def get_channel(self, name):
"""Return the channel for the given name
:param name: the channel name
:type name: :class:`str`
:returns: the model instance
:rtype: :class:`models.Channel`
:raises: None
"""
r = self.kraken_request('GET', 'channels/' + name)
return models.Channel.wrap_get_channel(r)
def search_channels(self, query, limit=25, offset=0):
"""Search for channels and return them
:param query: the query string
:type query: :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of channels
:rtype: :class:`list` of :class:`models.Channel` instances
:raises: None
"""
r = self.kraken_request('GET', 'search/channels',
params={'query': query,
'limit': limit,
'offset': offset})
return models.Channel.wrap_search(r)
def get_stream(self, channel):
"""Return the stream of the given channel
:param channel: the channel that is broadcasting.
Either name or models.Channel instance
:type channel: :class:`str` | :class:`models.Channel`
:returns: the stream or None, if the channel is offline
:rtype: :class:`models.Stream` | None
:raises: None
"""
if isinstance(channel, models.Channel):
channel = channel.name
r = self.kraken_request('GET', 'streams/' + channel)
return models.Stream.wrap_get_stream(r)
def get_streams(self, game=None, channels=None, limit=25, offset=0):
"""Return a list of streams queried by a number of parameters
sorted by number of viewers descending
:param game: the game or name of the game
:type game: :class:`str` | :class:`models.Game`
:param channels: list of models.Channels or channel names (can be mixed)
:type channels: :class:`list` of :class:`models.Channel` or :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list` of :class:`models.Stream`
:raises: None
"""
if isinstance(game, models.Game):
game = game.name
channelnames = []
cparam = None
if channels:
for c in channels:
if isinstance(c, models.Channel):
c = c.name
channelnames.append(c)
cparam = ','.join(channelnames)
params = {'limit': limit,
'offset': offset,
'game': game,
'channel': cparam}
r = self.kraken_request('GET', 'streams', params=params)
return models.Stream.wrap_search(r)
def search_streams(self, query, hls=False, limit=25, offset=0):
"""Search for streams and return them
:param query: the query string
:type query: :class:`str`
:param hls: If true, only return streams that have hls stream
:type hls: :class:`bool`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list` of :class:`models.Stream` instances
:raises: None
"""
r = self.kraken_request('GET', 'search/streams',
params={'query': query,
'hls': hls,
'limit': limit,
'offset': offset})
return models.Stream.wrap_search(r)
@needs_auth
def followed_streams(self, limit=25, offset=0):
"""Return the streams the current user follows.
Needs authorization ``user_read``.
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list`of :class:`models.Stream` instances
:raises: :class:`exceptions.NotAuthorizedError`
"""
r = self.kraken_request('GET', 'streams/followed',
params={'limit': limit,
'offset': offset})
return models.Stream.wrap_search(r)
def get_user(self, name):
"""Get the user for the given name
:param name: The username
:type name: :class:`str`
:returns: the user instance
:rtype: :class:`models.User`
:raises: None
"""
r = self.kraken_request('GET', 'user/' + name)
return models.User.wrap_get_user(r)
@needs_auth
def query_login_user(self, ):
"""Query and return the currently logined user
:returns: The user instance
:rtype: :class:`models.User`
:raises: :class:`exceptions.NotAuthorizedError`
"""
r = self.kraken_request('GET', 'user')
return models.User.wrap_get_user(r)
def get_playlist(self, channel):
"""Return the playlist for the given channel
:param channel: the channel
:type channel: :class:`models.Channel` | :class:`str`
:returns: the playlist
:rtype: :class:`m3u8.M3U8`
:raises: :class:`requests.HTTPError` if channel is offline.
"""
if isinstance(channel, models.Channel):
channel = channel.name
token, sig = self.get_channel_access_token(channel)
params = {'token': token, 'sig': sig,
'allow_audio_only': True,
'allow_source': True}
r = self.usher_request(
'GET', 'channel/hls/%s.m3u8' % channel, params=params)
playlist = m3u8.loads(r.text)
return playlist
def get_quality_options(self, channel):
"""Get the available quality options for streams of the given channel
Possible values in the list:
* source
* high
* medium
* low
* mobile
* audio
:param channel: the channel or channel name
:type channel: :class:`models.Channel` | :class:`str`
:returns: list of quality options
:rtype: :class:`list` of :class:`str`
:raises: :class:`requests.HTTPError` if channel is offline.
"""
optionmap = {'chunked': 'source',
'high': 'high',
'medium': 'medium',
'low': 'low',
'mobile': 'mobile',
'audio_only': 'audio'}
p = self.get_playlist(channel)
options = []
for pl in p.playlists:
q = pl.media[0].group_id
options.append(optionmap[q])
return options
def get_channel_access_token(self, channel):
"""Return the token and sig for the given channel
:param channel: the channel or channel name to get the access token for
:type channel: :class:`channel` | :class:`str`
:returns: The token and sig for the given channel
:rtype: (:class:`unicode`, :class:`unicode`)
:raises: None
"""
if isinstance(channel, models.Channel):
channel = channel.name
r = self.oldapi_request(
'GET', 'channels/%s/access_token' % channel).json()
return r['token'], r['sig']
def get_chat_server(self, channel):
"""Get an appropriate chat server for the given channel
Usually the server is irc.twitch.tv. But because of the delicate
twitch chat, they use a lot of servers. Big events are on special
event servers. This method tries to find a good one.
:param channel: the channel with the chat
:type channel: :class:`models.Channel`
:returns: the server address and port
:rtype: (:class:`str`, :class:`int`)
:raises: None
"""
r = self.oldapi_request(
'GET', 'channels/%s/chat_properties' % channel.name)
json = r.json()
servers = json['chat_servers']
try:
r = self.get(TWITCH_STATUSURL)
except requests.HTTPError:
log.debug('Error getting chat server status. Using random one.')
address = servers[0]
else:
stats = [client.ChatServerStatus(**d) for d in r.json()]
address = self._find_best_chat_server(servers, stats)
server, port = address.split(':')
return server, int(port)
@staticmethod
def _find_best_chat_server(servers, stats):
"""Find the best from servers by comparing with the stats
:param servers: a list if server adresses, e.g. ['0.0.0.0:80']
:type servers: :class:`list` of :class:`str`
:param stats: list of server statuses
:type stats: :class:`list` of :class:`chat.ChatServerStatus`
:returns: the best server adress
:rtype: :class:`str`
:raises: None
"""
best = servers[0] # In case we sind no match with any status
stats.sort() # gets sorted for performance
for stat in stats:
for server in servers:
if server == stat:
# found a chatserver that has the same address
# than one of the chatserverstats.
# since the stats are sorted for performance
# the first hit is the best, thus break
best = server
break
if best:
# already found one, so no need to check the other
# statuses, which are worse
break
return best
def get_emote_picture(self, emote, size=1.0):
"""Return the picture for the given emote
:param emote: the emote object
:type emote: :class:`pytwitcherapi.chat.message.Emote`
:param size: the size of the picture.
Choices are: 1.0, 2.0, 3.0
:type size: :class:`float`
:returns: A string resembling the picturedata of the emote
:rtype: :class:`str`
:raises: None
"""
r = self.get('http://static-cdn.jtvnw.net/emoticons/v1/%s/%s' %
(emote.emoteid, size))
return r.content
|
__init__.py
|
############################################################################
#
# Copyright (c) Mamba Developers. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
############################################################################
""" Component for handling socket TMTC """
import os
import threading
import socketserver
from typing import Optional
from mamba.core.component_base import Component
from mamba.core.context import Context
from mamba.core.msg import Raw, Empty
from mamba.core.exceptions import ComponentConfigException
class TcpSingleSocketServer(Component):
""" Plugin base class """
def __init__(self,
context: Context,
local_config: Optional[dict] = None) -> None:
super().__init__(os.path.dirname(__file__), context, local_config)
# Define custom variables
self._server: Optional[ThreadedTCPServer] = None
self._server_thread: Optional[threading.Thread] = None
# Initialize observers
self._register_observers()
def _register_observers(self) -> None:
# Quit is sent to command App finalization
self._context.rx['quit'].subscribe(on_next=self._close)
def initialize(self) -> None:
if not all(key in self._configuration for key in ['host', 'port']):
raise ComponentConfigException(
"Missing required elements in component configuration")
# Create the socket server, binding to host and port
socketserver.TCPServer.allow_reuse_address = True
self._server = ThreadedTCPServer(
(self._configuration['host'], self._configuration['port']),
ThreadedTCPRequestHandler, self)
# Start a thread with the server -- that thread will then start one
# more thread for each request
self._server_thread = threading.Thread(
target=self._server.serve_forever)
# Exit the server thread when the main thread terminates
self._server_thread.daemon = True
self._server_thread.start()
print("Server loop running in thread:", self._server_thread.name)
def _close(self, rx_value: Optional[Empty] = None) -> None:
""" Entry point for closing application
Args:
rx_value (Empty): The value published by the subject.
"""
if self._server is not None:
self._server.shutdown()
self._server = None
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
"""
The request handler class for the socket server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# Register observer for raw_tm
observer = self.server.raw_tm.subscribe(on_next=self.send_tm)
# Send incoming data to raw_tc
while True:
# self.request is the TCP socket connected to the client
try:
data = str(self.request.recv(1024), 'utf-8')
if not data:
break
self.server.log_dev(fr' -> Received socket TC: {data}')
self.server.raw_tc.on_next(Raw(data))
except ConnectionResetError:
break
# Dispose observer when connection is closed
observer.dispose()
self.server.log_info('Remote socket connection has been closed')
def send_tm(self, raw_tm: Raw):
""" Send msg telemetry over the socket connection """
self.server.log_dev( # type: ignore
fr' <- Published socket TM: {raw_tm.msg}')
try:
self.request.sendall(raw_tm.msg.encode('utf-8'))
except BrokenPipeError:
pass
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, request_handler_class,
parent: TcpSingleSocketServer) -> None:
super().__init__(server_address, request_handler_class)
self.raw_tc = parent._context.rx['raw_tc']
self.raw_tm = parent._context.rx['raw_tm']
self.log_dev = parent._log_dev
self.log_info = parent._log_info
|
spot_finder_backend.py
|
"""
Reference:
Python Multiprocessing with ZeroMQ
http://taotetek.net/2011/02/02/python-multiprocessing-with-zeromq/
"""
import iotbx.phil
import libtbx.phil
import os
import stat
import time
import datetime
import getpass
import zmq
import re
import Queue
import collections
#import sqlite3
import pysqlite2.dbapi2 as sqlite3
import threading
import traceback
import numpy
import cPickle as pickle
import hashlib
from PIL import Image
from multiprocessing import Process
#import inotify.adapters # use yamtbx.python -mpip install inotify
from yamtbx.dataproc.myspotfinder import shikalog
from yamtbx.dataproc.myspotfinder import config_manager
from yamtbx.dataproc.myspotfinder import spot_finder_for_grid_scan
from yamtbx.dataproc import bl_logfiles
from yamtbx.dataproc import eiger
from yamtbx import util
#DEBUG for inotify
#import logging
#logger = logging.getLogger("inotify.adapters")
#logger.setLevel(logging.DEBUG)
#handlers = logging.StreamHandler()
#handlers.setLevel(logging.DEBUG)
#handlers.setFormatter(logging.Formatter("%(asctime)-15s %(levelname)s : %(message)s"))
#logger.addHandler(handlers)
master_params_str = """\
topdir = None
.type = path
.help = Root directory
bl = 32xu 41xu 26b2 44xu 45xu
.type = choice(multi=False)
.help = Choose beamline where you start SHIKA
date = "today"
.type = str
.help = Data collection date ("today" or %Y-%d-%m format)
blconfig = None
.type = path
.help = Override default blconfig path (/isilon/blconfig/bl$bl/)
nproc = 4
.type = int
.help = Number of processors used for spot finding
ports = 5557,5558,5559
.type = ints(size=3,value_min=1024,value_max=49151)
.help = Port numbers used by ZeroMQ.
dbdir = /isilon/cluster/log/shika/db
.type = path
.help = location to write sqlite3 db file.
logroot = /isilon/cluster/log/shika/
.type = path
mode = *eiger_streaming bsslog zoo watch_ramdisk
.type = choice(multi=False)
env = *oys ppu
.type = choice(multi=False)
.help = Excetution environment
eiger_host = "192.168.163.204"
.type = str
.help = "EIGER hostname or ip-address"
#incomplete_file_workaround = 0
# .type = float
# .help = wait given seconds after detecting new image
force_ssh_from = None
.type = str
.help = Users must not change this parameter.
only_check_in_last_hours = 1
.type = float
.help = "Only check diffscan.log modified during the last specified hours"
ramdisk_walk_interval = 2
.type = float
"""
params = None
def retry_until_success(f, arg=None):
args = (arg,) if arg is not None else ()
return util.retry_until_noexc(f, args, ntry=30, outf=shikalog.warning)
class DiffScanManager:
def __init__(self):
self.clear()
# __init__()
def clear(self):
self.scanlog = {} # directory: BssDiffscanLog object
self.found_imgs = set()
self.finished = {} # {filename:timestamp}; list of filenames of which analysis was completed
# clear()
def add_scanlog(self, slog):
slog = os.path.abspath(slog)
self.scanlog[os.path.dirname(slog)] = bl_logfiles.BssDiffscanLog(slog)
# add_scanlog()
def add_dir(self, slogdir):
self.add_scanlog(os.path.join(slogdir, "diffscan.log"))
# add_dir()
def update_scanlogs(self):
for logdir, slog in self.scanlog.items():
if os.path.isfile(slog.scanlog):
slog.parse()
else:
shikalog.error("diffraction scan log is not found!: %s" %slog.scanlog)
continue
# if no update since all images processed
mtime = os.path.getmtime(slog.scanlog)
if mtime == self.finished.get(slog.scanlog, -1): continue
# Update 'processed files' using database
dbfile = os.path.join(logdir, "_spotfinder", "shika.db")
for _ in xrange(10):
try:
if not os.path.exists(os.path.dirname(dbfile)): os.mkdir(os.path.dirname(dbfile))
con = sqlite3.connect(dbfile, timeout=30)
break
except sqlite3.OperationalError:
shikalog.warning("Connecting to %s failed. Retrying" % dbfile)
cur = con.cursor()
# status TABLE
#c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
c = retry_until_success(cur.execute, "SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
if c.fetchone() is not None:
#cur.execute("select filename from status")
retry_until_success(cur.execute, "select filename from status")
processed_files = map(lambda x:os.path.join(logdir, x[0]), cur.fetchall())
print "debug::",processed_files
self.found_imgs.update(processed_files)
# check if all images are processed
if len(processed_files) == sum(map(lambda x: len(x.filename_idxes), slog.scans)):
shikalog.info("All %d images in %s are already processed. Will not check unless diffscan.log updated"%(len(processed_files), slog.scanlog))
self.finished[slog.scanlog] = mtime
#con.commit()
retry_until_success(con.commit)
con.close()
# update_scanlogs()
def get_unprocessed_images(self, env=None):
ret = []
self.update_scanlogs()
for slogdir, slog in self.scanlog.items():
for scan in slog.scans:
fcs = map(lambda x: (os.path.join(slogdir, x[0]), x[1]), scan.filename_idxes)
#print "fix=", fcs
#if env == "ppu": fcs_proxy = map(lambda x: (re.sub("^/isilon/users/", "/ramdisk/", x[0]), x[1]), fcs)
if env == "ppu": f_mod = lambda x: re.sub("^/isilon/users/", "/ramdisk/", x)
else: f_mod = lambda x: x
unproc = filter(lambda x: x[0] not in self.found_imgs and os.path.isfile(f_mod(x[0])), fcs)
ret.extend(map(lambda x:x+(scan,), unproc))
self.found_imgs.update(map(lambda x: x[0], ret))
return ret # (filename, idx, scan object)
# get_unprocessed_images()
def remove_found(self, files): # when user wants to recalculate..
self.found_imgs.difference_update(files)
# remove_found()
def needs_to_be_processed(self, filename):
"""
Check if the given file needs to be processed.
No need to process if
- not included in diffscan.log
- first image in row (only if BSS creates such non-sense files)
"""
scaninfo = self.get_scan_info(filename)
if scaninfo is None:
return False
# return True here *if* BSS no longer creates such non-sense files.
# this should be an option.
if scaninfo.is_shutterless():
r = scaninfo.get_file_number_based_on_template(filename)
num = int(r.group(1))
if scaninfo.hpoints > 1:
return num%(scaninfo.hpoints+1) != 0 # if remainder is 0, discard the image.
else:
return num != 0 # discard 000.img
else:
return True
# needs_to_be_processed()
def get_grid_coord(self, filename):
dirname = os.path.dirname(filename)
if dirname not in self.scanlog:
shikalog.warning("get_grid_coord(): directory is not found: %s" % dirname)
return None
return self.scanlog[dirname].get_grid_coord(os.path.basename(filename))
# get_grid_coord()
def get_scan_info(self, filename):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
if not dirname in self.scanlog:
shikalog.warning("get_scan_info(): directory is not found: %s" % dirname)
return None
for scan in reversed(self.scanlog[dirname].scans):
if scan.match_file_with_template(filename):
return scan
shikalog.warning("get_scan_info(): Not in scans: %s" % dirname)
return None
# get_scan_info()
def get_gonio_xyz_phi(self, filename):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
if not dirname in self.scanlog:
return None
for scan in reversed(self.scanlog[dirname].scans):
for f, c in scan.filename_coords:
if basename == f:
if scan.is_shutterless():
return list(c[0]) + [scan.fixed_spindle]
else:
return list(c[0]) + [scan.osc_start]
return None
# get_gonio_xyz_phi()
# class DiffScanManager
class WatchScanlogThread:
def __init__(self, queue, topdir, beamline=None, expdate="today"):
self.queue = queue
self.topdir = topdir
self.interval = 5
self.thread = None
#self.latest_dir = None
self.keep_going = True
self.running = True
self.beamline = beamline
self.last_bsslog = None
self.last_bsslog_line = 0
self.expdate = None
if expdate != "today": self.expdate = datetime.datetime.strptime(expdate, "%Y-%m-%d")
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self._cached_dirs = {}
#self.thread.start()
def start(self, interval=None):
# Thread should be already started.
# Just start to notify the latest directory.
self.notify_latest_dir = True
#wx.PostEvent(self.parent, EventLogWatcherStarted())
if interval is not None:
self.interval = interval
# If accidentally stopped
if not self.is_running():
self.keep_going = True
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
pass
def is_running(self): return self.thread is not None and self.thread.is_alive()
def find_in_directory(self, topdir):
scanlogs = [] # (filename, date)
for root, dirnames, filenames in os.walk(topdir):
if "diffscan.log" in filenames:
scanlog = os.path.join(root, "diffscan.log")
scanlogs.append((scanlog, os.path.getmtime(scanlog)))
return scanlogs
# find_in_directory()
def find_in_bsslog(self, topdir):
def read_bsslog_line(l):
if self.beamline in ("41xu","45xu") and "/ramdisk/" in l: l = l.replace("/ramdisk/","/isilon/users/", 1) # XXX should check if Pilatus or not!!
if topdir not in l: return
l = l[l.index(topdir):]
if " " in l: l = l[:l.index(" ")]
if ",.img" in l: l = l[:l.index(",.img")]
return os.path.dirname(l)
basedate = datetime.datetime.today() if self.expdate is None else self.expdate
if self.last_bsslog is None:
shikalog.debug("checking yesterday's bss log")
self.last_bsslog = os.path.join(params.blconfig, "log",
(basedate - datetime.timedelta(days=1)).strftime("bss_%Y%m%d.log"))
if not os.path.isfile(self.last_bsslog):
shikalog.info("Yesterday's log not found: %s"%self.last_bsslog)
current_bsslog = os.path.join(params.blconfig, "log", basedate.strftime("bss_%Y%m%d.log"))
if self.last_bsslog is not None and self.last_bsslog != current_bsslog and os.path.isfile(self.last_bsslog):
shikalog.debug("reading last-log %s from %d" % (os.path.basename(self.last_bsslog), self.last_bsslog_line))
for i, l in enumerate(open(self.last_bsslog)):
if i <= self.last_bsslog_line: continue
# read last log!
found = read_bsslog_line(l)
if found is not None: self._cached_dirs[found] = time.time()
# reset for reading current log
self.last_bsslog_line = 0
if os.path.isfile(current_bsslog):
shikalog.debug("reading curr-log %s from %d" % (os.path.basename(current_bsslog), self.last_bsslog_line))
i = -1 # in case empty file
for i, l in enumerate(open(current_bsslog)):
if i <= self.last_bsslog_line: continue
# read current log!
found = read_bsslog_line(l)
if found is not None: self._cached_dirs[found] = time.time()
# set for next reading
self.last_bsslog_line = i
else:
shikalog.info("bsslog not found: %s"%current_bsslog)
self.last_bsslog = current_bsslog
scanlogs = map(lambda x: os.path.join(x, "diffscan.log"), self._cached_dirs)
uid = os.getuid()
scanlogs = filter(lambda x: os.path.isfile(x) and os.stat(x).st_uid==uid, scanlogs)
if params.only_check_in_last_hours is not None and params.only_check_in_last_hours > 0:
now = time.time()
last_seconds = params.only_check_in_last_hours*60*60
scanlogs = filter(lambda x: (now-os.path.getmtime(x))<last_seconds, scanlogs)
if scanlogs: shikalog.debug("found diffscan.log in bsslog: %s" % scanlogs)
for k in self._cached_dirs.keys():
# clear old cache
if time.time() - self._cached_dirs[k] > 60*5: del self._cached_dirs[k]
return map(lambda x: (x, os.path.getmtime(x)), scanlogs)
# find_in_bsslog()
def run_inner(self, method="bsslog"):
assert method in ("bsslog", "os.walk")
startt = time.time()
if method == "bsslog":
scanlogs = self.find_in_bsslog(self.topdir)
else:
scanlogs = self.find_in_directory(self.topdir)
shikalog.debug("WatchScanlogThread.run_inner(method=%s) took %.3f sec for finding" % (method,
time.time()-startt))
if len(scanlogs) > 0:
scanlogs.sort(key=lambda x:x[1], reverse=True)
for x in scanlogs: self.queue.put(x)
# run_inner()
def run(self):
def mysleep():
if self.interval < 1:
time.sleep(self.interval)
else:
for i in xrange(int(self.interval/.5)):
if self.keep_going:
time.sleep(.5)
# mysleep()
shikalog.info("WatchScanlogThread loop STARTED")
while self.keep_going:
#shikalog.debug("in WatchScanlogThread timer")
try:
self.run_inner()
except:
shikalog.error("Error in WatchScanlogThread\n%s" % (traceback.format_exc()))
mysleep()
shikalog.info("WatchScanlogThread loop FINISHED")
self.running = False
# run()
# class WatchScanlogThread
class WatchDirThread:
def __init__(self, queue, pushport):
self.interval = 5
self.thread = None
self.queue = queue
self.dirs = set()
self.diffscan_manager = DiffScanManager()
self.zmq_context = zmq.Context()
self.ventilator_send = self.zmq_context.socket(zmq.PUSH)
self.ventilator_send.bind("tcp://*:%d"%pushport)
def start(self, interval=None):
self.stop()
self.keep_going = True
self.running = True
if interval is not None:
self.interval = interval
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
if self.is_running():
#shikalog.info("Stopping WatchDirThread.. Wait.")
self.keep_going = False
self.thread.join()
else:
pass
#shikalog.info("WatchDirThread already stopped.")
def is_running(self): return self.thread is not None and self.thread.is_alive()
def run_inner(self):
#shikalog.debug("in WatchDirThread timer")
startt = time.time()
while not self.queue.empty():
scanlog, scanlogmtime = self.queue.get()
self.diffscan_manager.add_scanlog(scanlog)
if params.mode == "eiger_streaming":
# Not sure this is needed, but clearly *not* needed when mode != "eiger_streaming"
# because get_unprocessed_images() calls update_scanlogs()
self.diffscan_manager.update_scanlogs()
else:
new_imgs = self.diffscan_manager.get_unprocessed_images(env=params.env)
#print "new_imgs=", new_imgs
for img, idx, scan in new_imgs:
img_actual = img
if params.env=="ppu": img_actual = re.sub("^/isilon/users/", "/ramdisk/", img)
header = dict(file_prefix=scan.get_prefix()[:-1],
frame=idx-1,
raster_horizontal_number=scan.hpoints, raster_vertical_number=scan.vpoints,
raster_horizontal_step=scan.hstep, raster_vertical_step=scan.vstep,
raster_scan_direction=scan.scan_direction, raster_scan_path=scan.scan_path,
)
shikalog.debug("Sending %s,%s" % (img, idx))
msg = dict(imgfile=img, header=header,
cbf_data=open(img_actual, "rb").read())
self.ventilator_send.send_pyobj(msg)
shikalog.debug("WatchDirThread.run_inner took %.3f sec" % (time.time()-startt))
def run(self):
#shikalog.info("WatchDirThread loop STARTED")
while self.keep_going:
try:
self.run_inner()
except:
shikalog.error("Error in WatchDirThread\n%s" % (traceback.format_exc()))
if self.interval < 1:
time.sleep(self.interval)
else:
for i in xrange(int(self.interval/.5)):
if self.keep_going:
time.sleep(.5)
shikalog.info("WatchDirThread loop FINISHED")
self.running = False
#wx.PostEvent(self.parent, EventDirWatcherStopped()) # Ensure the checkbox unchecked when accidentally exited.
# run()
# class WatchDirThread
def walk_nolink(top, topdown=True, onerror=None):
# Original /misc/oys/xtal/cctbx/snapshots/dials-v1-8-3/base/lib/python2.7/os.py
try:
names = os.listdir(top)
except os.error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
try: st = os.lstat(os.path.join(top, name))
except: continue # ignore deleted file
if stat.S_ISDIR(st.st_mode):
dirs.append((name,st))
else:
nondirs.append((name,st))
if topdown:
yield top, dirs, nondirs
for name,_ in dirs:
new_path = os.path.join(top, name)
for x in walk_nolink(new_path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
# walk_nolink()
class WatchRamdiskThread:
def __init__(self, pushport, interval):
self.interval = interval
self.thread = None
self.zmq_context = zmq.Context()
self.ventilator_send = self.zmq_context.socket(zmq.PUSH)
self.ventilator_send.bind("tcp://*:%d"%pushport)
def start(self, interval=None):
self.stop()
self.keep_going = True
self.running = True
if interval is not None:
self.interval = interval
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
if self.is_running():
#shikalog.info("Stopping WatchRamdiskThread.. Wait.")
self.keep_going = False
self.thread.join()
else:
pass
#shikalog.info("WatchRamdiskThread already stopped.")
def is_running(self): return self.thread is not None and self.thread.is_alive()
def run_inner_inotify(self):
#shikalog.debug("in WatchRamdiskThread timer")
startt = time.time()
def is_processed(path, touch):
if not os.path.exists(touch): return False
mtime_touch = os.path.getmtime(touch)
mtime_path = os.path.getmtime(path)
return mtime_touch > mtime_path
itree = inotify.adapters.InotifyTree('/ramdisk',
mask=inotify.constants.IN_MOVED_TO|inotify.constants.IN_CLOSE_WRITE)
for header, type_names, path, filename in itree.event_gen(yield_nones=False):
print type_names, path, filename
if "IN_ISDIR" in type_names: continue
#if "IN_MOVED_TO" not in type_names: continue
if not filename.endswith(".cbf"): continue
imgf = os.path.join(path, filename)
# Check if already processed
touch = imgf+".touch"
if is_processed(imgf, touch): continue
# Send it!
img_isilon = re.sub("^/ramdisk/", "/isilon/users/", imgf)
header = dict(file_prefix=os.path.basename(img_isilon[:img_isilon.rindex("_")]),
frame=int(imgf[imgf.rindex("_")+1:imgf.rindex(".cbf")])-1)
shikalog.debug("Sending %s,%s" % (header["file_prefix"], header["frame"]+1))
msg = dict(imgfile=img_isilon, header=header,
cbf_data=open(imgf, "rb").read())
self.ventilator_send.send_pyobj(msg)
util.touch_file(touch) # mark as processed
shikalog.debug("WatchRamdiskThread.run_inner stopped in %.3f sec" % (time.time()-startt))
# run_inner_inotify()
def run_inner_walk(self):
def need_process(root, path_lst, filename_dict):
# filenames is list of (filename, lstat); directory name not included
# path_lst is (path, lstat(path)); directory name not included
path = os.path.join(root, path_lst[0])
lst = path_lst[1]
# don't process if link
if stat.S_ISLNK(lst.st_mode): return False
touch = path_lst[0] + ".touch"
# process if .touch not exists
mtime_touch = filename_dict.get(touch, None)
if mtime_touch is None: return lst.st_size >= 1000 and "Comment" in open(path).read(1000)
mtime_path = lst.st_mtime
if mtime_touch > mtime_path:
return False
else:
shikalog.debug("Newer than touch (%s; %s <= %s)"%(path, mtime_touch, mtime_path))
return lst.st_size >= 1000 and "Comment" in open(path).read(1000)
# need_process()
uid = os.getuid()
start_time = time.time()
n_dir = 0
for root, dirnames, filenames in walk_nolink("/ramdisk"):
n_dir += 1
if os.stat(root).st_uid != uid: continue
cbf_files = filter(lambda x: x[0].endswith(".cbf"), filenames)
filename_dict = dict(filenames)
new_files = filter(lambda x: need_process(root, x, filename_dict), cbf_files)
new_files = map(lambda x: os.path.join(root, x[0]), new_files)
for imgf in new_files:
img_isilon = re.sub("^/ramdisk/", "/isilon/users/", imgf)
header = dict(file_prefix=os.path.basename(img_isilon[:img_isilon.rindex("_")]),
frame=int(imgf[imgf.rindex("_")+1:imgf.rindex(".cbf")])-1)
shikalog.debug("Sending %s" % img_isilon)
msg = dict(imgfile=img_isilon, header=header,
cbf_data=open(imgf, "rb").read())
self.ventilator_send.send_pyobj(msg)
util.touch_file(imgf+".touch") # mark as processed
shikalog.debug("WatchRamdiskThread.run_inner_walk finished in %.3f sec (%d dirs)" % (time.time()-start_time, n_dir))
# run_inner_walk()
def run(self):
shikalog.info("WatchRamdiskThread loop STARTED")
while self.keep_going:
try:
self.run_inner_walk()
except:
shikalog.error("Error in WatchRamdiskThread\n%s" % (traceback.format_exc()))
if self.interval < 1:
time.sleep(self.interval)
else:
for i in xrange(int(self.interval/.5)):
if self.keep_going:
time.sleep(.5)
shikalog.info("WatchRamdiskThread loop FINISHED")
self.running = False
# run()
# class WatchRamdiskThread
class ResultsManager:
def __init__(self, rqueue, dbdir):
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.interval = 3
self.dbdir = dbdir
self.rqueue = rqueue
self._diffscan_params = {}
# __init__()
def start(self):
if not self.is_running():
self.keep_going = True
self.running = True
self.thread.start()
# start()
def is_running(self): return self.thread is not None and self.thread.is_alive()
def update_diffscan_params(self, msg):
wdir = str(msg["data_directory"])
prefix = str(msg["file_prefix"])
key = os.path.join(wdir, "_spotfinder", prefix)
if key in self._diffscan_params: del self._diffscan_params[key]
hstep = float(msg["raster_horizontal_step"])
# raster_horizontal_number was raster_horizotal_number until bss_jul04_2017
hpoint = int(msg.get("raster_horizotal_number", msg.get("raster_horizontal_number")))
vstep = float(msg["raster_vertical_step"])
vpoint = int(msg["raster_vertical_number"])
if "raster_scan_direction" in msg and "raster_scan_path" in msg:
scan_direction = str(msg["raster_scan_direction"])
scan_path = str(msg["raster_scan_path"])
else:
scanlog = os.path.join(wdir, "diffscan.log")
if not os.path.isfile(scanlog): return
slog = bl_logfiles.BssDiffscanLog(scanlog)
slog.remove_overwritten_scans()
matched = filter(lambda x: x.get_prefix()==prefix+"_", slog.scans)
if matched:
scan_direction = matched[-1].scan_direction
scan_path = matched[-1].scan_path
else:
return
self._diffscan_params[key] = (vpoint, vstep, hpoint, hstep, scan_direction, scan_path)
shikalog.info("_diffscan_params %s = %s" % (key, self._diffscan_params[key]))
# update_diffscan_params()
def get_raster_grid_coordinate(self, msg):
try:
header = msg["header"]
hstep = float(header["raster_horizontal_step"])
# raster_horizontal_number was raster_horizotal_number until bss_jul04_2017
hpoint = int(header.get("raster_horizotal_number", header.get("raster_horizontal_number")))
vstep = float(header["raster_vertical_step"])
vpoint = int(header["raster_vertical_number"])
scan_direction = str(header["raster_scan_direction"])
scan_path = str(header["raster_scan_path"])
return bl_logfiles.BssDiffscanLog.get_grid_coord_internal(vpoint, vstep, hpoint, hstep, msg["idx"], False, scan_direction, scan_path)
except (KeyError, TypeError):
shikalog.warning("not bringing info for grid coord: %s %s" % (msg.get("file_prefix"), msg.get("idx")))
#pass
wdir = str(msg["work_dir"])
key = os.path.join(wdir, str(msg["file_prefix"]))
if key in self._diffscan_params:
vpoint, vstep, hpoint, hstep, scan_direction, scan_path = self._diffscan_params[key]
return bl_logfiles.BssDiffscanLog.get_grid_coord_internal(vpoint, vstep, hpoint, hstep, msg["idx"], False, scan_direction, scan_path)
else:
shikalog.warning("_diffscan_params not available for %s" % key)
scanlog = os.path.join(wdir, "..", "diffscan.log")
gcxy = None
if os.path.isfile(scanlog):
slog = bl_logfiles.BssDiffscanLog(scanlog)
slog.remove_overwritten_scans()
gcxy = slog.calc_grid_coord(prefix=str(msg["file_prefix"])+"_", num=msg["idx"]) # may return None
if gcxy is None: gcxy = [float("nan")]*2
return gcxy
# get_raster_grid_coordinate()
def run(self):
shikalog.info("ResultsManager loop STARTED")
dbfile, summarydat, con, cur = None, None, None, None
rcon = sqlite3.connect(os.path.join(self.dbdir, "%s.db"%getpass.getuser()), timeout=10)
rcur = rcon.cursor()
#c = rcur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='updates';")
c = retry_until_success(rcur.execute, "SELECT name FROM sqlite_master WHERE type='table' AND name='updates';")
if c.fetchone() is None:
#rcur.execute("""create table updates (dirname text primary key,
# time real);""")
retry_until_success(rcur.execute, """create table updates (dirname text primary key,
time real);""")
while self.keep_going:
try:
messages = collections.OrderedDict()
while not self.rqueue.empty():
msg = self.rqueue.get()
if "bss_job_mode" in msg:
shikalog.info("bssinfo= %s"%msg)
try:
self.update_diffscan_params(msg)
except:
shikalog.error("Error in update_diffscan_params%s" % (traceback.format_exc()))
else:
messages.setdefault(os.path.normpath(str(msg["work_dir"])), []).append(msg)
for wdir in messages:
tmp = os.path.join(wdir, "shika.db")
if dbfile != tmp:
dbfile = tmp
con, cur = None, None
for _ in xrange(10):
try:
con = sqlite3.connect(dbfile, timeout=30)
break
except sqlite3.OperationalError:
shikalog.warning("Connecting to %s failed. Retrying" % dbfile)
if con is None:
shikalog.error("Could not connect to %s." % dbfile)
else:
cur = con.cursor()
summarydat = os.path.join(wdir, "summary.dat")
if not os.path.isfile(summarydat) or not os.path.getsize(summarydat):
open(summarydat, "w").write("prefix x y kind data filename\n")
canvas_data = {}
for msg in messages[wdir]:
imgf = os.path.basename(str(msg["imgfile"]))
spots_is = map(lambda x: x[2], msg["spots"])
if cur is not None:
# 'status'
c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
if c.fetchone() is None:
cur.execute("""create table status (filename text primary key);""")
cur.execute("insert or replace into status values (?)", (imgf,)) # for backward
# 'stats'
c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='stats';")
if c.fetchone() is None:
cur.execute("""create table stats (imgf text primary key, nspot real, total real, mean real);""")
cur.execute("insert or replace into stats values (?, ?,?,?)",
(imgf, len(msg["spots"]),
sum(spots_is),
sum(spots_is) / len(msg["spots"]) if len(msg["spots"])>0 else 0
))
# 'spots'
c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='spots';")
if c.fetchone() is None:
cur.execute("create table spots (filename text primary key, spots blob);")
# save jpg
if "jpgdata" in msg and msg["jpgdata"]:
jpgdir = os.path.join(wdir,
"thumb_%s_%.3d" % (str(msg["file_prefix"]), msg["idx"]//1000))
try: os.mkdir(jpgdir)
except: pass
jpgout = os.path.join(jpgdir, imgf+".jpg")
open(jpgout, "wb").write(msg["jpgdata"])
del msg["jpgdata"]
elif "thumbdata" in msg and msg["thumbdata"]:
#assert len(msg["thumbdata"])==600*600*3
thumbw = int(numpy.sqrt(len(msg["thumbdata"])/3))
assert len(msg["thumbdata"]) == 3*thumbw*thumbw
prefix = str(msg["file_prefix"])
idx = (msg["idx"]-1)//100
tmpdir = os.path.join(os.path.expanduser("~"), ".shikatmp")
if not os.path.exists(tmpdir): os.mkdir(tmpdir)
tmpfile = os.path.join(tmpdir, "%s_%s_%.3d.pkl" % (hashlib.sha256(wdir).hexdigest(), prefix, idx))
if (prefix, idx) in canvas_data:
canvas, ninc, _ = canvas_data[(prefix, idx)]
elif os.path.isfile(tmpfile):
shikalog.debug("loading thumbnail data from %s" % tmpfile)
canvas, ninc, _ = pickle.load(open(tmpfile))
else:
canvas, ninc = Image.new("RGB", (thumbw*10, thumbw*10), (0, 0, 0)), 0
thumbimg = Image.frombytes("RGB", (thumbw, thumbw), msg["thumbdata"])
idx2 = (msg["idx"]-1)%100
x, y = idx2%10, idx2//10
canvas.paste(thumbimg, (x*thumbw, y*thumbw))
ninc += 1
#canvas.save(jpgout, "JPEG", quality=90, optimize=True) # IT MAY COST TOO MUCH TIME (SHOULD RUN THIS JUST ONCE?)
# calc max frame number
hpoint = int(msg["header"].get("raster_horizotal_number", msg["header"].get("raster_horizontal_number"))) # raster_horizontal_number was raster_horizotal_number until bss_jul04_2017
vpoint = int(msg["header"]["raster_vertical_number"])
n_max = hpoint * vpoint
idx_max = (n_max-1)//100
n_in_last = n_max - idx_max*100
canvas_data[(prefix, idx)] = (canvas, ninc,
ninc == 100 or (idx_max==idx and ninc == n_in_last) # jpeg-completed flag
)
del msg["thumbdata"]
if cur is not None:
cur.execute("insert or replace into spots values (?, ?)",
(imgf, sqlite3.Binary(pickle.dumps(msg, -1))))
# summary.dat
try:
#tmptmp = time.time()
gcxy = self.get_raster_grid_coordinate(msg)
with open(summarydat, "a") as ofs:
kinds = ("n_spots", "total_integrated_signal","median_integrated_signal")
data = (len(msg["spots"]), sum(spots_is), numpy.median(spots_is))
for k, d in zip(kinds, data):
ofs.write("%s_ % .4f % .4f %s %s %s\n" % (str(msg["file_prefix"]),
gcxy[0], gcxy[1], k, d, imgf))
#shikalog.info("##DEBUG time: %f" %( time.time()-tmptmp))
except:
shikalog.error("Error in summary.dat generation at %s\n%s" % (wdir, traceback.format_exc()))
for prefix, idx in canvas_data:
tmpdir = os.path.join(os.path.expanduser("~"), ".shikatmp")
jpgdir = os.path.join(wdir, "thumb_%s" % prefix)
if not os.path.exists(tmpdir): os.mkdir(tmpdir)
if not os.path.exists(jpgdir): os.mkdir(jpgdir)
tmpfile = os.path.join(tmpdir, "%s_%s_%.3d.pkl" % (hashlib.sha256(wdir).hexdigest(), prefix, idx))
jpgout = os.path.join(jpgdir, "%s_%.6d-%.6d.jpg" % (prefix, idx*100+1, (idx+1)*100))
jpgtmp = os.path.join(jpgdir, ".tmp-%s_%.6d-%.6d.jpg" % (prefix, idx*100+1, (idx+1)*100))
shikalog.info("saving thumbnail jpeg as %s" % jpgout)
canvas_data[(prefix, idx)][0].save(jpgtmp, "JPEG", quality=50, optimize=True)
os.rename(jpgtmp, jpgout) # as it may take time
if canvas_data[(prefix, idx)][2]:
if os.path.isfile(tmpfile): os.remove(tmpfile)
else:
shikalog.info("saving thumbnail data to %s" % tmpfile)
pickle.dump(canvas_data[(prefix, idx)], open(tmpfile, "w"), -1)
while True:
try: con.commit()
except sqlite3.OperationalError:
shikalog.warning("sqlite3.OperationalError. Retrying.")
time.sleep(1)
continue
break
rcur.execute("insert or replace into updates values (?,?)", (wdir, time.time()))
rcon.commit()
shikalog.info("%4d results updated in %s" % (len(messages[wdir]), wdir))
except:
shikalog.error("Exception: %s" % traceback.format_exc())
time.sleep(self.interval)
self.running = False
shikalog.info("ResultsManager loop FINISHED")
# run()
# ResultsManager
def results_receiver(rqueue, pullport, results_manager):
zmq_context = zmq.Context()
receiver = zmq_context.socket(zmq.PULL)
receiver.bind("tcp://*:%d"%pullport)
last_times = []
while True:
msg = receiver.recv_pyobj()
if "bss_job_mode" not in msg:
last_times.append(time.time())
if len(last_times) > 50: last_times = last_times[-50:]
last_times = filter(lambda x: last_times[-1]-x < 50, last_times)
hz = float(len(last_times))/(last_times[-1]-last_times[0]) if len(last_times)>1 else 0
shikalog.info("%s %6.2f Hz (last %2d)" % (msg["imgfile"], hz, len(last_times)))
rqueue.put(msg)
#results_manager.start()
# results_receiver()
def worker(wrk_num, params):
context = zmq.Context()
# Set up a channel to receive work from the ventilator
work_receiver = context.socket(zmq.PULL)
work_receiver.connect("tcp://127.0.0.1:%d"%params.ports[0])
eiger_receiver = context.socket(zmq.PULL)
eiger_receiver.connect("tcp://%s:9999"%params.eiger_host)
# Set up a channel to send result of work to the results reporter
results_sender = context.socket(zmq.PUSH)
results_sender.connect("tcp://127.0.0.1:%d"%params.ports[1])
# Set up a channel to receive control messages over
control_receiver = context.socket(zmq.SUB)
control_receiver.connect("tcp://127.0.0.1:%d"%params.ports[2])
control_receiver.setsockopt(zmq.SUBSCRIBE, "")
# Set up a poller to multiplex the work receiver and control receiver channels
poller = zmq.Poller()
poller.register(work_receiver, zmq.POLLIN)
poller.register(control_receiver, zmq.POLLIN)
if params.mode == "eiger_streaming":
poller.register(eiger_receiver, zmq.POLLIN)
params_dict = {}
for key in config_manager.sp_params_strs:
params_str = config_manager.sp_params_strs[key] + config_manager.get_common_params_str()
master_params = libtbx.phil.parse(spot_finder_for_grid_scan.master_params_str)
working_params = master_params.fetch(sources=[libtbx.phil.parse(params_str)])
params_dict[key] = working_params.extract()
shikalog.info("worker %d ready" % wrk_num)
# Loop and accept messages from both channels, acting accordingly
while True:
socks = dict(poller.poll())
# the message came from work_receiver channel
if socks.get(work_receiver) == zmq.POLLIN:
msg = work_receiver.recv_json()
imgfile = str(msg["imgfile"])
pkey = config_manager.get_key_by_img(imgfile)
#params_str = config_manager.sp_params_strs[pkey] + config_manager.get_common_params_str()
shikalog.info("wrker%.2d: %s"%(wrk_num, msg))
#master_params = libtbx.phil.parse(spot_finder_for_grid_scan.master_params_str)
#working_params = master_params.fetch(sources=[libtbx.phil.parse(params_str)])
#working_params.show()
#dparams = working_params.extract()
dparams = params_dict[pkey]
dparams.work_dir = os.path.join(os.path.dirname(imgfile), "_spotfinder") # PPU-case!!?
if os.path.exists(dparams.work_dir): assert os.path.isdir(dparams.work_dir)
else:
try: os.mkdir(dparams.work_dir)
except: pass
result = spot_finder_for_grid_scan.run(imgfile, dparams)
result.update(msg)
result["work_dir"] = dparams.work_dir
result["params"] = dparams
results_sender.send_pyobj(result)
# the message from EIGER
if socks.get(eiger_receiver) == zmq.POLLIN:
frames = eiger_receiver.recv_multipart(copy = False)
header, data = eiger.read_stream_data(frames)
if util.None_in(header, data): continue
#params_str = config_manager.sp_params_strs[("BL32XU", "EIGER9M", None, None)] + config_manager.get_common_params_str()
#master_params = libtbx.phil.parse(spot_finder_for_grid_scan.master_params_str)
#working_params = master_params.fetch(sources=[libtbx.phil.parse(params_str)])
#working_params.show()
dparams = params_dict[("BL32XU", "EIGER9M", None, None)] #working_params.extract()
dparams.work_dir = os.path.join(str(header["data_directory"]), "_spotfinder")
if os.path.exists(dparams.work_dir): assert os.path.isdir(dparams.work_dir)
else:
try: os.mkdir(dparams.work_dir)
except: pass
shikalog.info("Got data: %s"%header)
imgfile = os.path.join(header["data_directory"],
"%s_%.6d.img"%(str(header["file_prefix"]), header["frame"]+1))
result = spot_finder_for_grid_scan.run(imgfile, dparams, data_and_header=(data, header))
result["work_dir"] = dparams.work_dir
result["params"] = dparams
result["imgfile"] = imgfile
result["template"] = "%s_%s.img"%(str(header["file_prefix"]), "?"*6)
result["idx"] = header["frame"]+1
results_sender.send_pyobj(result)
#os.remove(imgfile)
# the message for control
if socks.get(control_receiver) == zmq.POLLIN:
msg = control_receiver.recv_pyobj()
if "params" in msg:
params_dict = msg["params"]
shikalog.info("worker %d: Parameters updated" % wrk_num)
shikalog.info("Worker %d finished." % wrk_num)
# worker()
def run_from_args(argv):
if "-h" in argv or "--help" in argv:
print "All parameters:\n"
iotbx.phil.parse(master_params_str).show(prefix=" ", attributes_level=1)
return
cmdline = iotbx.phil.process_command_line(args=argv,
master_string=master_params_str)
global params
params = cmdline.work.extract()
args = cmdline.remaining_args
if params.topdir is None: params.topdir = os.getcwd()
if params.mode == "zoo":
if params.bl == "32xu": params.blconfig = "/isilon/BL32XU/BLsoft/PPPP/10.Zoo/ZooConfig/"
elif params.bl == "26b2": params.blconfig = "/isilon/users/rikenbl/rikenbl/Zoo/ZooConfig/"
if params.blconfig is None:
params.blconfig = "/isilon/blconfig/bl%s" % params.bl
shikalog.config(params.bl, "backend", params.logroot)
if params.force_ssh_from is not None:
shikalog.info("SSH_CONNECTION= %s" % os.environ.get("SSH_CONNECTION", ""))
if "SSH_CONNECTION" not in os.environ:
print
print "ERROR!! Cannot get host information! SHIKA cannot start."
print "Please contact staff. Need to use ssh or change parameter."
print
return
ssh_from = os.environ["SSH_CONNECTION"].split()[0]
if ssh_from != params.force_ssh_from:
print
print "ERROR!! Your host is not allowed! SHIKA cannot start here."
if ssh_from[:ssh_from.rindex(".")] == "10.10.126" and 162 <= int(ssh_from[ssh_from.rindex(".")+1:]) <= 170:
shikalog.debug("Access from Oyster computer.")
print "You appear to be in Oyster. Please *LOGOUT* from Oyster, and try to start SHIKA from your local computer!!"
else:
print "Please contact staff. Need to access from the allowed host."
print
return
#import subprocess
#import sys
#pickle.dump(params, open("/tmp/params.pkl","w"),-1)
#pp = []
for i in xrange(params.nproc):
Process(target=worker, args=(i,params)).start()
#p = subprocess.Popen(["%s -"%sys.executable], shell=True, stdin=subprocess.PIPE)
#p.stdin.write("from yamtbx.dataproc.myspotfinder.command_line.spot_finder_backend import worker\nimport pickle\nworker(%d, pickle.load(open('/tmp/params.pkl')))"%i)
#p.stdin.close()
#pp.append(p)
rqueue = Queue.Queue()
results_manager = ResultsManager(rqueue=rqueue, dbdir=params.dbdir)
if params.mode == "watch_ramdisk":
ramdisk_watcher = WatchRamdiskThread(pushport=params.ports[0],
interval=params.ramdisk_walk_interval)
ramdisk_watcher.start()
elif params.mode != "eiger_streaming":
queue = Queue.Queue()
scanlog_watcher = WatchScanlogThread(queue, topdir=params.topdir,
beamline=params.bl, expdate=params.date)
dir_watcher = WatchDirThread(queue, pushport=params.ports[0])
scanlog_watcher.start()
dir_watcher.start()
results_manager.start() # this blocks!??!
results_receiver(rqueue=rqueue, pullport=params.ports[1], results_manager=results_manager)
#for p in pp: p.wait()
if params.nproc==0:
while True:
time.sleep(1)
# run_from_args()
if __name__ == "__main__":
import sys
run_from_args(sys.argv[1:])
|
app.py
|
import os
import multiprocessing
from datetime import datetime
from bot import Bot
from db import DB
def startBot(username, password, copy):
b = Bot(username, password, copy)
b.run()
if __name__ == "__main__":
db = None
jobs = []
while True:
print("\nSelect an Option:\n"
"1. List Active Accounts\n"
"2: Add User\n"
"3: Pause User\n"
"4: Remove User\n"
"5: Exit\n")
inp = raw_input("Selection: ")
if inp == '1':
if len(jobs) == 0:
print("\n\nNo accounts have been added.")
else:
print("\n\nActive Accounts:")
for i in range(0, len(jobs)):
currentJob = jobs[i]
db = DB("accounts/" + currentJob[0] + "/data.db")
status = ""
if currentJob[1].is_alive():
expires = db.expired()
status = "ACTIVE (Expires " + str(expires) + ")"
else:
if os.path.exists('accounts/' + currentJob[0]):
expires = datetime.strptime(db.expired(), "%Y-%m-%d %H:%M:%S.%f")
if expires <= datetime.now():
status = "DEAD"
else:
status = "DEAD | NOT EXPIRED - RELOAD ACCOUNT"
print(str(i + 1) + ": " + currentJob[0] + " - " + status)
db = None
print("\n")
elif inp == '2':
copy = []
username = raw_input("Username: ")
password = raw_input("Password: ")
for i in range(0, 10):
a = raw_input("Copy " + str(i + 1) + ": ")
if a != "":
copy.append(a)
else:
break
p = multiprocessing.Process(target=startBot, args=(username, password, copy,))
jobs.append([username, p])
p.start()
elif inp == '3':
print("T")
elif inp == '4':
userToTerminate = raw_input("User To Remove: ")
userToTerminate = int(userToTerminate) - 1
if userToTerminate > len(jobs):
print("Invalid Selection")
else:
user = jobs[userToTerminate][0]
jobToTerminate = jobs[userToTerminate][1]
jobToTerminate.terminate()
jobs.remove(jobs[userToTerminate])
print("\n" + user + ": Account has been removed.\n")
elif inp == '5':
confirmation = raw_input("Are you sure you want to exit? All accounts will be shut down (Y/N): ")
if confirmation == 'y' or confirmation == 'Y':
for i in range(0, len(jobs)):
currentJob = jobs[i][1]
currentJob.terminate()
exit()
elif confirmation == 'n' or confirmation == 'N':
continue
else:
print("\nInvalid option\n")
|
dx_operations_vdb_orig.py
|
#!/usr/bin/env python
# Corey Brune - Oct 2016
#This script starts or stops a VDB
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""List all VDBs or Start, stop, enable, disable a VDB
Usage:
dx_operations_vdb.py (--vdb <name> [--stop | --start | --enable | --disable] | --list | --all_dbs <name>)
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_operations_vdb.py -h | --help | -v | --version
List all VDBs, start, stop, enable, disable a VDB
Examples:
dx_operations_vdb.py --engine landsharkengine --vdb testvdb --stop
dx_operations_vdb.py --vdb testvdb --start
dx_operations_vdb.py --all_dbs enable
dx_operations_vdb.py --all_dbs disable
dx_operations_vdb.py --list
Options:
--vdb <name> Name of the VDB to stop or start
--start Stop the VDB
--stop Stop the VDB
--all_dbs <name> Enable or disable all dSources and VDBs
--list List all databases from an engine
--enable Enable the VDB
--disable Disable the VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.2.301'
import sys
from os.path import basename
from time import sleep, time
from docopt import docopt
import re
from delphixpy.exceptions import HttpError
from delphixpy.exceptions import JobError
from delphixpy.exceptions import RequestError
from delphixpy.web import database
from delphixpy.web import job
from delphixpy.web import source
from delphixpy.web import sourceconfig
from delphixpy.web import repository
from delphixpy.web import environment
from delphixpy.web.capacity import consumer
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxLogging import print_exception
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
def vdb_operation(vdb_name, operation):
"""
Function to start, stop, enable or disable a VDB
"""
print_debug('Searching for %s reference.\n' % (vdb_name))
vdb_obj = find_obj_by_name(dx_session_obj.server_session, source, vdb_name)
try:
if vdb_obj:
if operation == 'start':
source.start(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'stop':
source.stop(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'enable':
source.enable(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'disable':
source.disable(dx_session_obj.server_session,
vdb_obj.reference)
except (RequestError, HttpError, JobError, AttributeError), e:
raise DlpxException('An error occurred while performing ' +
operation + ' on ' + vdb_name + '.:%s\n' % (e))
def all_databases(operation):
"""
Enable or disable all dSources and VDBs on an engine
operation: enable or disable dSources and VDBs
"""
for db in database.get_all(dx_session_obj.server_session):
# assert isinstance(db.name, object)
print '%s %s\n' % (operation, db.name)
vdb_operation(db.name, operation)
sleep(2)
def list_databases():
"""
Function to list all databases for a given engine
"""
import pdb;pdb.set_trace()
try:
for db_stats in consumer.get_all(dx_session_obj.server_session):
db_stats_env = repository.get(dx_session_obj.server_session,
find_obj_by_name(
dx_session_obj.server_session,sourceconfig,
db_stats.name).repository)
env_obj = environment.get(dx_session_obj.server_session,
db_stats_env.environment)
source_stats = find_obj_by_name(dx_session_obj.server_session,
source, db_stats.name)
if db_stats.parent == None:
db_stats.parent = 'dSource'
print('Name = %s\nProvision Container Reference= %s\n'
'Virtualized Database Disk Usage: %.2f GB\n'
'Unvirtualized Database Disk Usage: %.2f GB\n'
'Size of Snapshots: %.2f GB\nEnabled: %s\n'
'Status:%s\nEnvironment: %s\n' % (str(db_stats.name),
str(db_stats.parent),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
source_stats.runtime.database_size / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024,
source_stats.runtime.enabled, source_stats.runtime.status,
env_obj.name))
except (JobError) as e:
#except (RequestError, DlpxException, JobError, AttributeError) as e:
print_exception('An error occurred while listing databases:'
' \n{}\n'.format((e)))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
jobs = {}
try:
#Setup the connection to the Delphix Engine
dx_session_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
except DlpxException as e:
print_exception('\nERROR: Engine %s encountered an error while'
'%s:\n%s\n' % (engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
#reset the running job count before we begin
i = 0
with dx_session_obj.job_mode(single_thread):
while (len(jobs) > 0 or len(thingstodo)> 0):
if len(thingstodo)> 0:
try:
if arguments['--start']:
vdb_operation(database_name, 'start')
elif arguments['--stop']:
vdb_operation(database_name, 'stop')
elif arguments['--enable']:
vdb_operation(database_name, 'enable')
elif arguments['--disable']:
vdb_operation(database_name, 'disable')
elif arguments['--list']:
list_databases()
elif arguments['--all_dbs']:
if not re.match('disable|enable',
arguments['--all_dbs'].lower()):
raise DlpxException('--all_dbs should be either'
'enable or disable')
except DlpxException as e:
print('\nERROR: Could not perform action on the VDB(s)'
'\n%s\n' % e.message)
thingstodo.pop()
#get all the jobs, then inspect them
i = 0
for j in jobs.keys():
job_obj = job.get(dx_session_obj.server_session, jobs[j])
print_debug(job_obj)
print_info(engine["hostname"] + ": VDB Operations: " +
job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the
# running jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the running
# job count.
i += 1
print_info(engine["hostname"] + ": " + str(i) + " jobs running. ")
#If we have running jobs, pause before repeating the checks.
if len(jobs) > 0:
sleep(float(arguments['--poll']))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
#If the --all argument was given, run against every engine in dxtools.conf
engine = None
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print 'Error encountered in run_job():\n%s' % (e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dx_session_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: %s\n' %
(arguments['--engine']))
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException('\nERROR: Delphix Engine %s cannot be '
'found in %s. Please check your value '
'and try again. Exiting.\n' % (
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: %s' % (
dx_session_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start)/60, +1)
return elapsed_minutes
def main(argv):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global database_name
global dx_session_obj
global debug
if arguments['--debug']:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
database_name = arguments['--vdb']
#This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info("script took " + str(elapsed_minutes) +
" minutes to get this far.")
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception('Connection failed to the Delphix Engine'
'Please check the ERROR message below')
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception('A job failed in the Delphix Engine')
print_info('%s took %s minutes to get this far\n' %
(basename(__file__), str(elapsed_minutes)))
sys.exit(3)
except DlpxException as e:
elapsed_minutes = time_elapsed()
print_info('%s took %s minutes to get this far\n' %
(basename(__file__), str(elapsed_minutes)))
sys.exit(1)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info('%s took %s minutes to get this far\n' %
(basename(__file__), str(elapsed_minutes)))
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info('%s took %s minutes to get this far\n' %
(basename(__file__), str(elapsed_minutes)))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
|
destination.py
|
#!/usr/bin/env python
import socket # for python Socket API
import time # used to generate timestamps
import struct # used in decoding messages
from threading import Thread # used for multithreaded structure
# Destination node
if __name__ == "__main__":
# host1 = listen address for destination from r1
host1 = ("10.10.3.2", 10003)
# host2 = listen address for destination from r2
host2 = ("10.10.5.2", 10003)
# Creates an IPv4 socket with UDP to listen from r1
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Changes listening socket's flag to not to be blocked by OS for port security reasons
sock1.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Binds listening socket to listen address from r1 (host1)
sock1.bind(host1)
# Creates an IPv4 socket with UDP to listen from r2
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Changes listening socket's flag to not to be blocked by OS for port security reasons
sock2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Binds listening socket to listen address from r2 (host2)
sock2.bind(host2)
print "Listening on", host1
print "Listening on", host2
# Two lists used for calculating avg e2e delay between source and destination
delta1 = [] # for packets that came from r1
delta2 = [] # for packets that came from r2
# Two lists used for storing incoming data temporarily
data1 = [] # for packets that came from r1
data2 = [] # for packets that came from r2
# handler function for listening for incoming packages and handling their storage issues
# when a message arrives calculate the time difference between the current time and the timestamp on the package
# then append it to 'delta' list and append incoming data to 'data' list
def handler(sock, delta, data):
while True:
try:
msg = sock.recv(1024)
if msg == "" or msg is None: # terminate connection
break
# extract sequence number
seq = struct.unpack("i", msg[:4])[0]
# check if whole packet came (1024 bytes)
if not (seq + 1) % 8:
# calculate e2e delay and append it to the delta list
delta.append(time.time() - struct.unpack("d", msg[4:12])[0])
print "Received DATA within {}ms".format(delta[-1] * 1000)
# append data to the data list
data.append([seq, msg[12:]])
except KeyboardInterrupt:
print "Exitting..."
break
# Since destination needs to listen from 2 nodes (r1 and r2) at the same time
# A thread for handling incoming messages from r1 gets created and sent to 'handler'
# function with proper parameters
r1_handler = Thread(target=handler,args=(sock1, delta1, data1))
r1_handler.start()
# Main execution will listen incoming messages from r2
handler(sock2, delta2, data2)
# Wait implicitly for r1_handler to finish
r1_handler.join()
# Calculate average delay with getting the mean of total delays
print "Avg e2e:", (sum(delta1 + delta2) / len(delta1 + delta2)) * 1000 , "ms"
# Combine arrived data from 2 lists
data = data1 + data2
# and order them according to their sequence numbers
data.sort()
# Save them to a file named "sensor_readings.txt"
with open("sensor_readings.txt", "wb") as output_file:
for i in data:
output_file.write(i[1])
print "Closing connection..."
|
test_basic.py
|
import gc
import re
import sys
import time
import uuid
import weakref
from datetime import datetime
from platform import python_implementation
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
require_cpython_gc = pytest.mark.skipif(
python_implementation() != "CPython",
reason="Requires CPython GC behavior",
)
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added
# when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware:
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "'localhost' is not a valid cookie domain" in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return str(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash("Hello World")
flask.flash("Hello World", "error")
flask.flash(flask.Markup("<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
"Hello World",
"Hello World",
flask.Markup("<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", "Hello World"),
("error", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", "Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == "Hello World"
assert messages[1] == flask.Markup("<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_403(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if _trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if _trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for _trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert "This was submitted: 'index.txt'" in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return "Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return "Hällo Wörld".encode()
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response(
"Hello world", 404, {"Content-Type": "text/html", "X-Foo": "Baz"}
),
{"Content-Type": "text/plain", "X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route("/dict")
def from_dict():
return {"foo": "bar"}, 201
assert client.get("/text").data == "Hällo Wörld".encode()
assert client.get("/bytes").data == "Hällo Wörld".encode()
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.content_type == "text/plain"
assert rv.headers.getlist("X-Foo") == ["Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get("/dict")
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e.value)
assert "from_none" in str(e.value)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e.value)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e.value)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python >= 3.7")
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder="", static_url_path="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires Python >= 3.6")
def test_static_folder_with_pathlib_path(app):
from pathlib import Path
app = flask.Flask(__name__, static_folder=Path("static"))
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_ending_slash():
app = flask.Flask(__name__, static_folder="static/")
@app.route("/<path:path>")
def catch_all(path):
return path
rv = app.test_client().get("/catch/all")
assert rv.data == b"catch/all"
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder
# but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host
# but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 1.0 warning about name mismatch
with pytest.warns(None):
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
AssertionError()
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
AssertionError()
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo.bar.baz", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.bar.baz.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.bar.baz.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route("/киртест")
def index():
return "Hello World!"
rv = client.get("/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e.value)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e.value)
assert "Make sure to directly send your POST-request to this URL" in str(
e.value
)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for _x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# suppress Werkzeug 0.15 warning about name mismatch
with pytest.warns(None):
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View:
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = f"running on {hostname}:{port} ..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == f"running on {hostname}:{port} ..."
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(
monkeypatch, host, port, server_name, expect_host, expect_port, app
):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
@require_cpython_gc
def test_app_freed_on_zero_refcount():
# A Flask instance should not create a reference cycle that prevents CPython
# from freeing it when all external references to it are released (see #3761).
gc.disable()
try:
app = flask.Flask(__name__)
assert app.view_functions["static"]
weak = weakref.ref(app)
assert weak() is not None
del app
assert weak() is None
finally:
gc.enable()
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import glob
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QMainWindow, QMenu,
QMessageBox, QShortcut, QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
qt_message_handler, set_links_color,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
get_safe_mode, is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.utils.image_path_manager import get_image_path
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.solver import (
find_external_plugins, find_internal_plugins, solve_plugin_dependencies)
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
raise SpyderAPIError('Plugin "{}" not found!'.format(plugin_name))
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.NAME] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.NAME] = plugin
else:
self._INTERNAL_PLUGINS[plugin.NAME] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# Tour
# TODO: Should be a plugin
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# --- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Set css_path config (to change between light and dark css versions
# for the Help and IPython console plugins)
# TODO: There is a circular dependency between help and ipython
if CONF.get('help', 'enable'):
CONF.set('help', 'css_path', css_path)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
# Get ordered list of plugins classes and instantiate them
plugin_deps = solve_plugin_dependencies(enabled_plugins.values())
for plugin_class in plugin_deps:
plugin_name = plugin_class.NAME
# Non-migrated plugins
if plugin_name in [
Plugins.Editor,
Plugins.IPythonConsole,
Plugins.Projects]:
if plugin_name == Plugins.IPythonConsole:
plugin_instance = plugin_class(self, css_path=css_path)
else:
plugin_instance = plugin_class(self)
plugin_instance.register_plugin()
self.add_plugin(plugin_instance)
if plugin_name == Plugins.Projects:
self.project_path = plugin_instance.get_pythonpath(
at_start=True)
else:
self.preferences.register_plugin_preferences(
plugin_instance)
# Migrated or new plugins
elif plugin_name in [
Plugins.MainMenu,
Plugins.OnlineHelp,
Plugins.Toolbar,
Plugins.Preferences,
Plugins.Appearance,
Plugins.Run,
Plugins.Shortcuts,
Plugins.StatusBar,
Plugins.Completions,
Plugins.OutlineExplorer,
Plugins.Console,
Plugins.MainInterpreter,
Plugins.Breakpoints,
Plugins.History,
Plugins.Profiler,
Plugins.Explorer,
Plugins.Help,
Plugins.Plots,
Plugins.VariableExplorer,
Plugins.Application,
Plugins.Find,
Plugins.Pylint,
Plugins.WorkingDirectory,
Plugins.Layout]:
plugin_instance = plugin_class(self, configuration=CONF)
self.register_plugin(plugin_instance)
# TODO: Check thirdparty attribute usage
# For now append plugins to the thirdparty attribute as was
# being done
if plugin_name in [
Plugins.Breakpoints,
Plugins.Profiler,
Plugins.Pylint]:
self.thirdparty_plugins.append(plugin_instance)
# Load external_plugins adding their dependencies
elif (issubclass(plugin_class, SpyderPluginV2) and
plugin_class.NAME in external_plugins):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.api.widgets.menus import SpyderMenu
from spyder.plugins.mainmenu.api import (
ApplicationMenus, HelpMenuSections, ToolsMenuSections,
FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
self.consoles_menu = mainmenu.get_application_menu("consoles_menu")
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
self.projects_menu = mainmenu.get_application_menu("projects_menu")
self.projects_menu.aboutToShow.connect(self.valid_project)
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
from spyder.plugins.application.plugin import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = self.application.get_action(
ApplicationActions.SpyderWindowsEnvVariables)
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action
)
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
mainmenu.add_item_to_application_menu(
self.menu_lsp_logs,
menu_id=ApplicationMenus.Tools)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
#----- Tours
# TODO: Move tours to a plugin structure
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self,
self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
mainmenu.add_item_to_application_menu(
self.tour_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.Documentation)
# TODO: Move to plugin
# IPython documentation
if self.help is not None:
self.ipython_menu = SpyderMenu(
parent=self,
title=_("IPython documentation"))
intro_action = create_action(
self,
_("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(
self,
_("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(
self,
_("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(
self.ipython_menu,
(intro_action, guiref_action, quickref_action))
mainmenu.add_item_to_application_menu(
self.ipython_menu,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.ExternalDocumentation,
before_section=HelpMenuSections.About)
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_id, plugin_instance in self._PLUGINS.items():
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance, Plugins.Console)
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin_instance.get_widget().toggle_view(False)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for __, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
screen = self.window().windowHandle().screen()
self.current_dpi = screen.logicalDotsPerInch()
screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_change_dismiss_box.isChecked():
self.show_dpi_message = False
self.dpi_change_dismiss_box = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.show_dpi_message:
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
self.dpi_change_dismiss_box = QCheckBox(
_("Hide this message during the current session"),
self
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > General > Interface</tt>, "
"in case Spyder is not displayed correctly.<br><br>"
"Do you want to restart Spyder?"))
msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(
_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(self.dpi_change_dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
msgbox.open()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(reset=reset)
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.layouts.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
crawler.py
|
import random
import subprocess
from functools import partial
import sys
import threading
from datetime import datetime, timedelta
from itertools import chain
from os import path
from time import sleep
from typing import Callable, Iterable, Union
from datetimerange import DateTimeRange
from loguru import logger
from selenium.common import exceptions as selenium_exceptions
from requests.exceptions import ProxyError
import settings
from bot import Bot
from models import exceptions
from models.account import Account, Dependent
from models.driver import Driver
from models.page import HomePage, AppointmentPage, ApplicantsPage
from utils import cycle, cleared, waited, FrozenDict, safe_iter, Default
from utils.url import Url
logger.remove(0)
logger.add(
sys.stderr, format=(
'[{time:YYYY-MM-DD HH:mm:ss}] [{level: ^7}] {extra[email]}: {message}'
), level=settings.LOG_LEVEL
)
logger.add(
path.join(settings.LOGS_PATH, '{time:YYYY-MM-DD_HH-mm-ss}.log'),
format=(
'[{time:YYYY-MM-DD HH:mm:ss}] [{level: ^7}] {extra[email]}: {message}'
), level=settings.LOG_LEVEL, rotation="00:00"
)
proxies = cycle([''] if not settings.PROXIES else random.sample(
settings.PROXIES, len(settings.PROXIES)
))
logger.configure(extra={'email': '\b'})
bot = Bot()
class Crawler:
def __init__(self, account_data: FrozenDict, data: dict):
self.account = self._create_account(account_data, data)
self.driver = Driver(self.account)
self.driver.set_page_load_timeout(settings.PAGE_LOAD_TIMEOUT)
self.account.updates.add_observer(bot)
for dependent in self.account.dependents:
dependent.updates.add_observer(bot)
self.logger = logger.bind(email=self.account.email)
self.appropriate_status = threading.Event()
self.access = threading.Event()
self.access.set()
self.init_driver()
self.driver.switch_to_tab(0)
def init_driver(self):
self.update_proxy()
page = HomePage(self.driver)
self.raw_get(page.URL)
page.language = 'en'
if self.account.auth_token:
self.driver.add_cookie({
'name': settings.AUTH_TOKEN_COOKIE_NAME,
'value': self.account.auth_token,
})
self.logger.info("an auth token cookie is used")
if self.account.session_id:
self.driver.add_cookie({
'name': settings.SESSION_ID_COOKIE_NAME,
'value': self.account.session_id,
})
self.logger.info("a session id cookie is used")
try:
self.get(page.URL)
except exceptions.AuthorizationException as e:
self.logger.error(str(e))
bot.send_error(self.account.email, str(e))
raise
self.driver.open_new_tab() # reserve a tab for status checking
self.driver.switch_to_tab(0)
for dependent in self.account.dependents:
self.driver.open_new_tab()
p = HomePage(self.driver)
self.get(p.URL)
p.language = 'en'
try:
p.click_applicants()
except selenium_exceptions.TimeoutException:
# there is no button so there are no dependents
msg = 'no dependents detected'
self.logger.error(msg)
bot.send_error(self.account.email, msg)
return
p = ApplicantsPage(self.driver)
try:
p.set_applicant(dependent.name)
except selenium_exceptions.TimeoutException:
msg = f'no dependent with name {dependent.name!r}'
self.logger.error(msg)
bot.send_error(self.account.email, msg)
continue
dependent.updates.update(
status=p.applicant_status, additional={'to_notify': False}
)
self.logger.debug(
f"{dependent.name!r} status is {p.applicant_status}"
)
def update_proxy(self):
with threading.Lock():
self.driver.set_proxy(next(proxies))
self.logger.debug(f'Set proxy to {self.driver.proxy}')
def __proxy_safe(self, func: Callable, *, args=None, kwargs=None) -> True:
"""
Execute func with args and kwargs safely by using proxy.
If no proxies produced successful result, ProxyException is raised.
All requests are tested via an access to an element, that is present
only on `models.page.BasePage` child page.
Args:
func (Callable): function to be called
args (None, optional): args to be passed to function
kwargs (None, optional): kwargs to be passed to function
Returns:
True
Raises:
exceptions.ProxyException: if no proxies succeeded
"""
args = args or tuple()
kwargs = kwargs or {}
try:
result = func(*args, **kwargs)
if self.test_response():
return result
except ProxyError:
pass
for _ in range(len(settings.PROXIES)):
self.update_proxy()
try:
result = func(*args, **kwargs)
if self.test_response():
return result
except ProxyError:
pass
raise exceptions.ProxyException('unable to get page via all proxies')
def test_response(self) -> bool:
page = HomePage(self.driver)
try:
return bool(page.language)
except selenium_exceptions.TimeoutException:
return False
def get(self, url: Union[str, Url]) -> True:
return self.__proxy_safe(self.driver.get, args=(url, ))
def raw_get(self, url: Union[str, Url]) -> True:
return self.__proxy_safe(self.driver.raw_get, args=(url, ))
@staticmethod
def _create_account(account_data: FrozenDict, data: dict):
if not Account.exists(email=account_data['email']):
Account.create(account_data['email'], account_data['password'])
account = Account(account_data['email'])
account.update(password=account_data['password'])
account.update(day_offset=data['day_offset'])
account.update(unavailability_datetime=data['unavailability_datetime'])
for name in data['dependents']:
if not Dependent.exists(name):
account.add_dependent(name)
return account
@logger.catch
def update_status(self):
page = HomePage(self.driver)
has_changed = False
with cleared(self.access):
self.driver.switch_to_tab(-1)
self.update_proxy()
self.logger.info('checking status')
self.get(page.URL)
status = page.status
if status == settings.DISABLE_APPOINTMENT_CHECKS_STATUS:
self.appropriate_status.clear() # stop scheduling
self.logger.debug(
'Disable appointment checks, inapporpriate status'
)
else:
self.appropriate_status.set()
if status != self.account.updates.status:
has_changed = True
self.logger.info("status is {}", status)
self.account.updates.update(
status=status,
additional={
'image': page.status_screenshot,
'email': self.account.email
}
)
self.driver.save_snapshot(settings.SNAPSHOTS_PATH)
self.driver.save_screenshot(settings.SCREENSHOTS_PATH)
else:
self.logger.info("status has not changed")
return has_changed
@logger.catch
def schedule_appointments(self):
page = HomePage(self.driver)
with waited(self.appropriate_status), cleared(self.access):
self.driver.switch_to_tab(0)
self.get(page.URL)
try:
page.click_calendar()
except selenium_exceptions.TimeoutException:
# inappropriate status for checking appointments
self.logger.error('no calendar button')
raise exceptions.NoAppointmentsException from None
iterator = self._check_new_appointments()
if not iterator:
return
settings.RequestTimeout.APPOINTMENT.value = (
settings.RequestTimeout.BURST_APPOINTMENT
)
self.driver.save_snapshot(settings.SNAPSHOTS_PATH)
self.driver.save_screenshot(settings.SCREENSHOTS_PATH)
is_ok = self._schedule_main(iterator)
if not is_ok:
return True
return self._schedule_dependents(iterator)
def get_valid_meeting(self, meetings_iterator: 'safe_iter'):
while meeting := next(meetings_iterator):
if self.is_valid_meeting(meeting):
return meeting
return False
def _check_new_appointments(self) -> Union[chain, bool]:
page = AppointmentPage(self.driver)
self.driver.switch_to_tab(0)
self.update_proxy()
self.logger.info('checking appointments')
page.refresh()
page.language = 'en'
page.matter_option = 'ARI'
offices = list(filter(
lambda x: x not in settings.AppointmentData.BLOCKED_OFFICES,
page.branch_options
)) # filter out inappropriate offices
offices.sort(key=lambda x: (
x in settings.AppointmentData.PRIORITY_OFFICES
), reverse=True)
meetings_iterator = safe_iter(
page.all_meetings(offices=offices)
)
meeting = self.get_valid_meeting(meetings_iterator)
if not meeting:
self.logger.info('no appointments have appeared')
return False
else:
# push meeting back to the iterator
return chain([meeting], meetings_iterator)
def is_valid_meeting(self, meeting: dict) -> bool:
min_datetime = (datetime.now() + timedelta(
days=self.account.day_offset
)).date()
if meeting['datetime'].date() < min_datetime:
self.logger.debug(f'Meeting {meeting} is invalid by day offset')
return False
elif any(
meeting['datetime'] in drange
for drange in self.account.unavailability_datetime
):
self.logger.debug(
f'Meeting {meeting} is in unavailability periods'
)
return False
else:
applicants = [d.updates for d in self.account.dependents] + [
self.account.updates
]
scheduled_meetings = [
{'datetime': x.datetime_signed, 'office': x.office_signed}
for x in applicants
if x.datetime_signed is not None
]
is_valid = True
for smeeting in scheduled_meetings:
if meeting['office'] == smeeting['office']:
continue
scheduled_bottom_edge = smeeting['datetime'] - timedelta(
hours=settings.AppointmentData.HOUR_OFFICE_OFFSET
)
scheduled_top_edge = smeeting['datetime'] + timedelta(
hours=settings.AppointmentData.HOUR_OFFICE_OFFSET
)
if meeting['datetime'] in DateTimeRange(
scheduled_bottom_edge, scheduled_top_edge
):
is_valid = False
break
if is_valid:
self.logger.debug(f'Meeting {meeting} is valid')
else:
self.logger.debug(
f"Meeting {meeting} is too close to scheduled meetings"
)
return is_valid
def _schedule_main(self, meetings_iterator: 'safe_iter'):
page = AppointmentPage(self.driver)
if not self.account.is_signed:
while meeting := self.get_valid_meeting(meetings_iterator):
page.refresh()
try:
is_success = page.schedule(meeting)
if not is_success:
raise selenium_exceptions.NoSuchElementException
except selenium_exceptions.NoSuchElementException:
self.logger.warning(f'Meeting {meeting} is unavailable')
except Exception as e:
self.logger.error("appointment {}", e.__class__)
else:
self.logger.success(
'main was scheduled on {:%Y-%m-%d %H:%M} at '
'{!r} office',
meeting['datetime'], meeting['office']
)
self.account.updates.update(
office_signed=meeting['office'],
datetime_signed=meeting['datetime'],
additional={'email': self.account.email}
)
return is_success
self.logger.warning('unable to make an appointment')
return False
return True
def _schedule_dependents(self, meetings_iterator: 'safe_iter'):
p = ApplicantsPage(self.driver)
# breakpoint()
for tab_index, dependent in enumerate(
sorted(self.account.dependents, key=lambda x: x.id),
start=1
):
if dependent.is_signed or dependent.updates.status == (
settings.DISABLE_APPOINTMENT_CHECKS_STATUS
):
continue
self.driver.switch_to_tab(tab_index)
sleep(0.25)
if self.driver.url == p.URL:
p.get_applicant_appointment()
page = AppointmentPage(self.driver)
page.language = 'en'
self.driver.save_snapshot(settings.SNAPSHOTS_PATH)
while meeting := self.get_valid_meeting(meetings_iterator):
try:
page.refresh()
is_success = page.schedule(meeting)
if not is_success:
raise selenium_exceptions.NoSuchElementException
except selenium_exceptions.NoSuchElementException:
self.logger.warning(f'Meeting {meeting} is unavailable')
except Exception as e:
self.logger.error(
f'{dependent.name!r} appointment {e.__class__}'
)
else:
self.logger.success(
'{!r} was scheduled on {:%Y-%m-%d %H:%M} '
'at {!r} office',
dependent.name,
meeting['datetime'],
meeting['office']
)
dependent.updates.update(
office_signed=meeting['office'],
datetime_signed=meeting['datetime'],
additional={
'email': self.account.email,
'dependent_name': dependent.name
}
)
break
else:
# if couldn't make an appointment for any dependent, skip
return False
return True
def _create_thread(
self, func: Callable[[], bool], sleep_time_range: Default
):
def infinite_loop():
while True:
result = func()
if result is None:
# None is returned by @logger.catch then an error occurred
bot.send_error(self.account.email, 'error occurred')
sleep(random.choice(settings.RequestTimeout.ERROR.value))
else:
sleep(random.choice(sleep_time_range.value))
threading.Thread(target=infinite_loop, daemon=True).start()
def start(self, *, checks: Iterable[settings.Check]):
checks_methods = {
settings.Check.APPOINTMENT: {
'method': self.schedule_appointments,
'sleep_time_range': settings.RequestTimeout.APPOINTMENT
},
settings.Check.STATUS: {
'method': self.update_status,
'sleep_time_range': settings.RequestTimeout.STATUS
}
}
self.update_status()
for check in set(checks):
data = checks_methods[check]
self._create_thread(data['method'], data['sleep_time_range'])
def main():
logger.info("Parser started")
crawlers = []
for account, data in settings.ACCOUNTS.items():
try:
crawler = Crawler(account, data)
except Exception as e:
logger.error(
f'Crawler {account["email"]} raised '
f'{e.__class__.__name__}: {str(e)}'
)
else:
crawlers.append(crawler)
crawler.start = partial(crawler.start, checks=data['checks'])
if not crawlers:
logger.error('All crawlers are dead')
return
for crawler in crawlers:
crawler.start()
bot.infinity_polling()
logger.info("Shutting down the parser")
# Kill all instances of driver
if crawlers:
subprocess.call(
settings.ChromeData.TASK_KILL_COMMAND.split(),
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT
)
|
test_scale_square.py
|
# scale.py
import os
import threading
import unittest
from sql30 import db
DB_NAME = './square.db'
class Config(db.Model):
TABLE = 'square'
PKEY = 'num'
DB_SCHEMA = {
'db_name': DB_NAME,
'tables': [
{
'name': TABLE,
'col_order': ['num', 'square'],
'fields': {
'num': 'int',
'square': 'int',
},
'primary_key': PKEY
}]
}
VALIDATE_BEFORE_WRITE = True
class ScaleTest(unittest.TestCase):
SQUARE_NUM_UPTO = 20 # make it multiple of 5
TABLE = 'square'
def setUp(self):
if os.path.exists(DB_NAME):
os.remove(DB_NAME)
db = Config()
db.table = self.TABLE
db.create(num=-1, square=1)
def test_scale(self):
def func(start, end):
db = Config()
db.table = self.TABLE
for x in range(start, end):
db.create(num=x, square=x*x)
db.close()
# Below 50 threads are created in parallel which make
# entries in the database at the same time by adding the
# square of 5 numbers (each) at the same time.
workers = []
for i in range(int(self.SQUARE_NUM_UPTO / 5)):
start, end = i*5, i*5 + 5
t = threading.Thread(target=func, args=(start, end))
workers.append(t)
t.start()
_ = [t.join() for t in workers]
db = Config()
db.table = self.TABLE
# read all the records and check that entries were made for all
# of them.
recs = db.read()
# print (sorted(recs))
keys = [x for x, _ in recs]
# print(sorted(keys))
assert all([x in keys for x in range(self.SQUARE_NUM_UPTO)])
def test_context(self):
"""
Tests for context manager operations.
"""
# TEST CASE 1:
# Create a new database instance and use it under context
# manager. This is the suggested usecase. Context Manager
# when exiting, commits the work and also closes the
# connection so user doesn't have to explicitly do so.
with Config() as db:
db.table = db.TABLE
db.create(num=-2, square=4)
# TEST CASE 2:
# Read data back in new context to ensure data was saved in
# the previous context.
db = Config()
with db.getContext() as db:
recs = db.read(tbl=db.TABLE, num=-2)
self.assertEqual(len(recs), 1)
def tearDown(self):
os.remove(DB_NAME)
|
datacollector.py
|
from multiprocessing import Process, Queue
def get_dates(queue, start_date='2010/07/17', end_date='2023/01/07'):
from bs4 import BeautifulSoup #module for web scraping install by pip install beautifulsoup4
import requests #for requesting html. install by pip install requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import re #regular expression for data extraction by pattern matching. installed by default.
import pandas as pd # for dataframe. install by pip install pandas
from csv import reader #for list structure. installed by default.
from tqdm import tqdm
import os
import shutil
import uuid
import time
start_date = start_date
end_date = end_date
# dir = str(uuid.uuid4())
# if os.path.exists(dir):
# shutil.rmtree(dir)
# os.makedirs(dir)
dir = r'/Users/dennisgrobe/Nextcloud/Documents/Coding/Portfolio Management/Bitcoin-Fork'
features=pd.DataFrame({'features':['transactions','size','sentbyaddress','difficulty','hashrate','mining_profitability','sentinusd','transactionfees','median_transaction_fee','confirmationtime','transactionvalue','mediantransactionvalue','activeaddresses','top100cap','fee_to_reward','price']})
indicators=pd.DataFrame({'indicators':['sma','ema','wma','trx','mom','std','var','rsi','roc']})
periods=pd.DataFrame({'periods':['3','7','14','30','90']})
crypto=pd.DataFrame({'crypto':['btc']})
df=pd.concat([crypto, features,indicators,periods], axis=1)
#for raw values
#all kinds of fees and transaction values are in USD. divide by price USD to obtain BTC
url_list=[] #stores generated urls
feature_list=[] #store feature names
i=0
while (i<=15): #this loop generates urls for raw values
url='https://bitinfocharts.com/comparison/'+df['features'][i]+'-'+'btc'+'.html'
feature = df['features'][i]
if "fee" in feature:
feature=df['features'][i]+'USD'
if 'value' in feature:
feature=df['features'][i]+'USD'
if 'usd' in feature:
feature=df['features'][i]+'USD'
url_list.append(url)
feature_list.append(feature)
#print(feature,' ',url)
i=i+1
#for indicators
#all kinds of fees and transaction values are in USD. drop them or recalculate them after converting the raw values to the BTC
i=0
while (i<=15): #this nested while loop generates url structure for all the indicators. for other currencies change btc to CURRENCY_NAME
j=0
while (j<=8):
k=0
while (k<=4):
url='https://bitinfocharts.com/comparison/'+df['features'][i]+'-'+'btc'+'-'+df['indicators'][j]+df['periods'][k]+'.html'
feature=df['features'][i]+df['periods'][k]+df['indicators'][j]
if "fee" in feature:
feature=df['features'][i]+df['periods'][k]+df['indicators'][j]+'USD'
if 'value' in feature:
feature=df['features'][i]+df['periods'][k]+df['indicators'][j]+'USD'
if 'price' in feature:
feature=df['features'][i]+df['periods'][k]+df['indicators'][j]+'USD'
if 'usd' in feature:
feature=df['features'][i]+df['periods'][k]+df['indicators'][j]+'USD'
if 'fee_in_reward' in feature:
feature=df['features'][i]+df['periods'][k]+df['indicators'][j]
url_list.append(url)
feature_list.append(feature)
#print(feature,' ',url)
k=k+1
j=j+1
i=i+1
df_feature=pd.DataFrame(feature_list,columns=['Features']) # convert feature list to dataframe
df_url=pd.DataFrame(url_list,columns=['URL']) # convert url list to dataframe
df2=df_feature.join(df_url) # join the feature and url dataframes
features=pd.DataFrame(columns=df2.Features) #change the feature list to columns
columns=len(features.columns) #to be used in while loop for getting data
columns
date=[] #create a date column for each feature. this is necessary for aligning by dates later
print('Building URLs ...')
for i in tqdm(range(len(features.columns))):
date=features.columns[i] + 'Date'
features[date]=date
i=0
print('Requesting data ... ')
for i in tqdm(range(columns)): #the most important. getting the data from the website. DONT ABUSE IT. you might be IP banned for requesting a lot
columnNames=[features.columns[i+columns],features.columns[i]]
url = df2.URL[i]
session = requests.Session()
retry = Retry(connect=10, backoff_factor=3)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
page=session.get(url)
#page = requests.get(url, time.sleep(3),timeout=10)
#print(page)
soup = BeautifulSoup(page.content, 'html.parser')
# values=soup.find_all('script')[4].get_text() # old version
values = soup.find_all('script')[4].string
newval=values.replace('[new Date("','')
newval2=newval.replace('"),',";")
newval3=newval2.replace('],',',')
newval4=newval3.replace('],',']]')
newval5=newval4.replace('null','0')
x = re.findall('\\[(.+?)\\]\\]', newval5)
df3=pd.DataFrame( list(reader(x)))
df_transposed=df3.transpose()
df_transposed.columns=['Value']
df_new=df_transposed['Value'].str.split(';', 1, expand=True)
df_new.columns= columnNames
mask = (df_new[features.columns[i+columns]] >= start_date) & (df_new[features.columns[i+columns]] <= end_date)
df_new= df_new.loc[mask]
features[features.columns[i]] = df_new[features.columns[i]]
features[features.columns[i+columns]]= df_new[features.columns[i+columns]]
df_new.columns = df_new.columns.str.replace('.*Date*', 'Date') # todo:<ipython-input-60-192d7b6fc5b4>:1: FutureWarning: The default value of regex will change from True to False in a future version.
path=dir+'/data/'+features.columns[i]+'.csv'
df_new.set_index('Date', inplace=True)
df_new.to_csv(path,sep=',', columns=[features.columns[i]])
#print(df_new)
#i = i+1
i=0
pricepath=dir+'/data/'+'price.csv'
df_merge=pd.read_csv(pricepath,sep=',')
print('Processing files ... ')
for i in tqdm(range(columns)):
path=dir+'/data/'+features.columns[i]+'.csv'
df=pd.read_csv(path,sep=',')
df_merge = pd.merge(df_merge, df, left_on='Date', right_on='Date')
#i = i+1
df_merge.drop(columns=['price_y'], inplace=True)
df_merge.columns = df_merge.columns.str.replace('price_x', 'priceUSD')
df_merge
path2=dir+'/data_main/'+'Merged_Unconverted_BTC_Data.csv'
df_merge.to_csv(path2,sep=',')
df_all = pd.read_csv(path2,sep = ',')
df_all.columns
#mediantransactionvalue_cols = [col for col in df_all.columns if 'mediantransactionvalue' in col]
#mediantransactionvalue_BTC_cols = [w.replace('USD', 'BTC') for w in mediantransactionvalue_cols]
#df_all[mediantransactionvalue_BTC_cols] = df_all[mediantransactionvalue_cols].div(df_all['priceUSD'].values,axis=0)
#sentinusd_cols = [col for col in df_all.columns if 'sentinusd' in col]
#sentinusd_BTC_cols = [w.replace('USD', 'BTC') for w in sentinusd_cols]
#df_all[sentinusd_BTC_cols] = df_all[sentinusd_cols].div(df_all['priceUSD'].values,axis=0)
#transactionfees_cols = [col for col in df_all.columns if 'transactionfees' in col]
#transactionfees_BTC_cols = [w.replace('USD', 'BTC') for w in transactionfees_cols]
#df_all[transactionfees_BTC_cols] = df_all[transactionfees_cols].div(df_all['priceUSD'].values,axis=0)
#median_transaction_fee_cols = [col for col in df_all.columns if 'median_transaction_fee' in col]
#median_transaction_fee_BTC_cols = [w.replace('USD', 'BTC') for w in median_transaction_fee_cols]
#df_all[median_transaction_fee_BTC_cols] = df_all[median_transaction_fee_cols].div(df_all['priceUSD'].values,axis=0)
#transactionvalue_cols = [col for col in df_all.columns if 'transactionvalue' in col]
#transactionvalue_BTC_cols = [w.replace('USD', 'BTC') for w in transactionvalue_cols]
#df_all[transactionvalue_BTC_cols] = df_all[transactionvalue_cols].div(df_all['priceUSD'].values,axis=0)
#USDvalue_cols = sentinusd_cols+transactionfees_cols+median_transaction_fee_cols+mediantransactionvalue_cols+transactionvalue_cols
#df_all.drop(USDvalue_cols, axis=1, inplace=True)
#df_all.drop(list(df_all.filter(regex = 'price')), axis = 1, inplace = True)
df_all.drop(columns=['Unnamed: 0'], axis=1, inplace=True)
df_all['priceUSD']=df_merge['priceUSD']
df_all
filename=dir+'/data_main/'+'BTC_Data.csv'
df_all.to_csv(filename,sep=',')
queue.put(df_all)
def get_data(start_date='2010/07/17', end_date='2023/01/07'):
'''
def get_data(start_date, end_date):
...
return df
Note: date format in YYYY/MM/DD
Example:
from datacollector import get_data
df=get_data('2020/01/01','2020/01/07')
'''
q = Queue()
p = Process(target=get_dates, args=(q, start_date, end_date))
p.start()
df=q.get()
p.join()
return df
if __name__ == '__main__':
get_data()
|
launcher.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of multiple AEA configs launcher."""
import logging
import multiprocessing
from asyncio.events import AbstractEventLoop
from concurrent.futures.process import BrokenProcessPool
from multiprocessing.synchronize import Event
from os import PathLike
from threading import Thread
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union
from aea.aea import AEA
from aea.aea_builder import AEABuilder
from aea.exceptions import AEAException
from aea.helpers.base import cd
from aea.helpers.multiple_executor import (
AbstractExecutorTask,
AbstractMultipleExecutor,
AbstractMultipleRunner,
AbstractMultiprocessExecutorTask,
AsyncExecutor,
ExecutorExceptionPolicies,
ProcessExecutor,
TaskAwaitable,
ThreadExecutor,
)
from aea.runtime import AsyncRuntime
logger = logging.getLogger(__name__)
def load_agent(agent_dir: Union[PathLike, str]) -> AEA:
"""
Load AEA from directory.
:param agent_dir: agent configuration directory
:return: AEA instance
"""
with cd(agent_dir):
return AEABuilder.from_aea_project(".").build()
def _set_logger(
log_level: Optional[str],
): # pragma: nocover # used in spawned process and pytest does not see this code
from aea.cli.utils.loggers import ( # pylint: disable=import-outside-toplevel
default_logging_config, # pylint: disable=import-outside-toplevel
)
logger = logging.getLogger("aea")
logger = default_logging_config(logger)
if log_level is not None:
level = logging.getLevelName(log_level)
logger.setLevel(level)
def _run_agent(
agent_dir: Union[PathLike, str], stop_event: Event, log_level: Optional[str] = None
) -> None:
"""
Load and run agent in a dedicated process.
:param agent_dir: agent configuration directory
:param stop_event: multithreading Event to stop agent run.
:param log_level: debug level applied for AEA in subprocess
:return: None
"""
_set_logger(log_level=log_level)
agent = load_agent(agent_dir)
def stop_event_thread():
try:
stop_event.wait()
except (KeyboardInterrupt, EOFError, BrokenPipeError) as e: # pragma: nocover
logger.error(
f"Exception raised in stop_event_thread {e} {type(e)}. Skip it, looks process is closed."
)
finally:
agent.stop()
Thread(target=stop_event_thread, daemon=True).start()
try:
agent.start()
except KeyboardInterrupt: # pragma: nocover
logger.debug("_run_agent: keyboard interrupt")
except BaseException as e: # pragma: nocover
logger.exception("exception in _run_agent")
exc = AEAException(f"Raised {type(e)}({e})")
exc.__traceback__ = e.__traceback__
raise exc
finally:
agent.stop()
class AEADirTask(AbstractExecutorTask):
"""Task to run agent from agent configuration directory."""
def __init__(self, agent_dir: Union[PathLike, str]) -> None:
"""
Init aea config dir task.
:param agent_dir: direcory with aea config.
"""
self._agent_dir = agent_dir
self._agent: AEA = load_agent(self._agent_dir)
super().__init__()
def start(self) -> None:
"""Start task."""
self._agent.start()
def stop(self):
"""Stop task."""
if not self._agent: # pragma: nocover
raise Exception("Task was not started!")
self._agent.stop()
def create_async_task(self, loop: AbstractEventLoop) -> TaskAwaitable:
"""Return asyncio Task for task run in asyncio loop."""
self._agent.runtime.set_loop(loop)
if not isinstance(self._agent.runtime, AsyncRuntime): # pragma: nocover
raise ValueError(
"Agent runtime is not async compatible. Please use runtime_mode=async"
)
return loop.create_task(self._agent.runtime.run_runtime())
@property
def id(self) -> Union[PathLike, str]:
"""Return agent_dir."""
return self._agent_dir
class AEADirMultiprocessTask(AbstractMultiprocessExecutorTask):
"""
Task to run agent from agent configuration directory.
Version for multiprocess executor mode.
"""
def __init__(
self, agent_dir: Union[PathLike, str], log_level: Optional[str] = None
):
"""
Init aea config dir task.
:param agent_dir: direcory with aea config.
:param log_level: debug level applied for AEA in subprocess
"""
self._agent_dir = agent_dir
self._manager = multiprocessing.Manager()
self._stop_event = self._manager.Event()
self._log_level = log_level
super().__init__()
def start(self) -> Tuple[Callable, Sequence[Any]]:
"""Return function and arguments to call within subprocess."""
return (_run_agent, (self._agent_dir, self._stop_event, self._log_level))
def stop(self):
"""Stop task."""
if self._future.done():
logger.debug("Stop called, but task is already done.")
return
try:
self._stop_event.set()
except (FileNotFoundError, BrokenPipeError, EOFError) as e: # pragma: nocover
logger.error(
f"Exception raised in task.stop {e} {type(e)}. Skip it, looks process is closed."
)
@property
def id(self) -> Union[PathLike, str]:
"""Return agent_dir."""
return self._agent_dir
@property
def failed(self) -> bool:
"""
Return was exception failed or not.
If it's running it's not failed.
:rerurn: bool
"""
if not self._future:
return False
if (
self._future.done()
and self._future.exception()
and isinstance(self._future.exception(), BrokenProcessPool)
): # pragma: nocover
return False
return super().failed
class AEALauncher(AbstractMultipleRunner):
"""Run multiple AEA instances."""
SUPPORTED_MODES: Dict[str, Type[AbstractMultipleExecutor]] = {
"threaded": ThreadExecutor,
"async": AsyncExecutor,
"multiprocess": ProcessExecutor,
}
def __init__(
self,
agent_dirs: Sequence[Union[PathLike, str]],
mode: str,
fail_policy: ExecutorExceptionPolicies = ExecutorExceptionPolicies.propagate,
log_level: Optional[str] = None,
) -> None:
"""
Init AEARunner.
:param agent_dirs: sequence of AEA config directories.
:param mode: executor name to use.
:param fail_policy: one of ExecutorExceptionPolicies to be used with Executor
:param log_level: debug level applied for AEA in subprocesses
"""
self._agent_dirs = agent_dirs
self._log_level = log_level
super().__init__(mode=mode, fail_policy=fail_policy)
def _make_tasks(self) -> Sequence[AbstractExecutorTask]:
"""Make tasks to run with executor."""
if self._mode == "multiprocess":
return [
AEADirMultiprocessTask(agent_dir, log_level=self._log_level)
for agent_dir in self._agent_dirs
]
else:
return [AEADirTask(agent_dir) for agent_dir in self._agent_dirs]
|
Spec.py
|
import glob
import os
import sys
from collections import defaultdict
from functools import partial as curry
from . import (
biblio,
boilerplate,
caniuse,
conditional,
config,
constants,
datablocks,
dfns,
extensions,
fingerprinting,
h,
headings,
highlight,
idl,
includes,
inlineTags,
lint,
markdown,
mdnspeclinks,
metadata,
shorthands,
wpt,
)
from .func import Functor
from .h import *
from .InputSource import FileInputSource, InputSource
from .messages import *
from .refs import ReferenceManager
from .unsortedJunk import *
class Spec:
def __init__(
self,
inputFilename,
debug=False,
token=None,
lineNumbers=False,
fileRequester=None,
testing=False,
):
self.valid = False
self.lineNumbers = lineNumbers
if lineNumbers:
# line-numbers are too hacky, so force this to be a dry run
constants.dryRun = True
if inputFilename is None:
inputFilename = findImplicitInputFile()
if inputFilename is None: # still
die(
"No input file specified, and no *.bs or *.src.html files found in current directory.\nPlease specify an input file, or use - to pipe from STDIN."
)
return
self.inputSource = InputSource(inputFilename, chroot=constants.chroot)
self.transitiveDependencies = set()
self.debug = debug
self.token = token
self.testing = testing
if fileRequester is None:
self.dataFile = config.defaultRequester
else:
self.dataFile = fileRequester
self.md = None
self.mdBaseline = None
self.mdDocument = None
self.mdCommandLine = None
self.mdDefaults = None
self.mdOverridingDefaults = None
self.lines = []
self.document = None
self.html = None
self.head = None
self.body = None
self.fillContainers = None
self.valid = self.initializeState()
def initializeState(self):
self.normativeRefs = {}
self.informativeRefs = {}
self.refs = ReferenceManager(fileRequester=self.dataFile, testing=self.testing)
self.externalRefsUsed = defaultdict(lambda: defaultdict(dict))
self.md = None
self.mdBaseline = metadata.MetadataManager()
self.mdDocument = None
self.mdCommandLine = metadata.MetadataManager()
self.mdDefaults = None
self.mdOverridingDefaults = None
self.biblios = {}
self.typeExpansions = {}
self.macros = defaultdict(lambda x: "???")
self.canIUse = {}
self.mdnSpecLinks = {}
self.widl = idl.getParser()
self.testSuites = json.loads(self.dataFile.fetch("test-suites.json", str=True))
self.languages = json.loads(self.dataFile.fetch("languages.json", str=True))
self.extraStyles = defaultdict(str)
self.extraStyles["style-colors"] = styleColors
self.extraStyles["style-darkmode"] = styleDarkMode
self.extraStyles["style-md-lists"] = styleMdLists
self.extraStyles["style-autolinks"] = styleAutolinks
self.extraStyles["style-selflinks"] = styleSelflinks
self.extraStyles["style-counters"] = styleCounters
self.extraStyles["style-issues"] = styleIssues
self.extraScripts = defaultdict(str)
try:
inputContent = self.inputSource.read()
self.lines = inputContent.lines
if inputContent.date is not None:
self.mdBaseline.addParsedData("Date", inputContent.date)
except FileNotFoundError:
die(
"Couldn't find the input file at the specified location '{0}'.",
self.inputSource,
)
return False
except OSError:
die("Couldn't open the input file '{0}'.", self.inputSource)
return False
return True
def recordDependencies(self, *inputSources):
self.transitiveDependencies.update(inputSources)
def preprocess(self):
self.transitiveDependencies.clear()
self.assembleDocument()
self.processDocument()
def assembleDocument(self):
# Textual hacks
stripBOM(self)
if self.lineNumbers:
self.lines = hackyLineNumbers(self.lines)
self.lines = markdown.stripComments(self.lines)
self.recordDependencies(self.inputSource)
# Extract and process metadata
self.lines, self.mdDocument = metadata.parse(lines=self.lines)
# First load the metadata sources from 'local' data
self.md = metadata.join(self.mdBaseline, self.mdDocument, self.mdCommandLine)
# Using that to determine the Group and Status, load the correct defaults.include boilerplate
self.mdDefaults = metadata.fromJson(
data=config.retrieveBoilerplateFile(self, "defaults", error=True),
source="defaults",
)
self.md = metadata.join(self.mdBaseline, self.mdDefaults, self.mdDocument, self.mdCommandLine)
# Using all of that, load up the text macros so I can sub them into the computed-metadata file.
self.md.fillTextMacros(self.macros, doc=self)
jsonEscapedMacros = {k: json.dumps(v)[1:-1] for k, v in self.macros.items()}
computedMdText = replaceMacros(
config.retrieveBoilerplateFile(self, "computed-metadata", error=True),
macros=jsonEscapedMacros,
)
self.mdOverridingDefaults = metadata.fromJson(data=computedMdText, source="computed-metadata")
self.md = metadata.join(
self.mdBaseline,
self.mdDefaults,
self.mdOverridingDefaults,
self.mdDocument,
self.mdCommandLine,
)
# Finally, compute the "implicit" things.
self.md.computeImplicitMetadata(doc=self)
# And compute macros again, in case the preceding steps changed them.
self.md.fillTextMacros(self.macros, doc=self)
self.md.validate()
extensions.load(self)
# Initialize things
self.refs.initializeRefs(self)
self.refs.initializeBiblio()
# Deal with further <pre> blocks, and markdown
self.lines = datablocks.transformDataBlocks(self, self.lines)
self.lines = markdown.parse(
self.lines,
self.md.indent,
opaqueElements=self.md.opaqueElements,
blockElements=self.md.blockElements,
)
# Note that, currently, markdown.parse returns an array of strings, not of Line objects.
self.refs.setSpecData(self.md)
# Convert to a single string of html now, for convenience.
self.html = "".join(line.text for line in self.lines)
boilerplate.addHeaderFooter(self)
self.html = self.fixText(self.html)
# Build the document
self.document = parseDocument(self.html)
self.head = find("head", self)
self.body = find("body", self)
correctH1(self)
includes.processInclusions(self)
metadata.parseDoc(self)
def processDocument(self):
# Fill in and clean up a bunch of data
conditional.processConditionals(self)
self.fillContainers = locateFillContainers(self)
lint.exampleIDs(self)
boilerplate.addBikeshedVersion(self)
boilerplate.addCanonicalURL(self)
boilerplate.addFavicon(self)
boilerplate.addSpecVersion(self)
boilerplate.addStatusSection(self)
boilerplate.addLogo(self)
boilerplate.addCopyright(self)
boilerplate.addSpecMetadataSection(self)
boilerplate.addAbstract(self)
boilerplate.addExpiryNotice(self)
boilerplate.addObsoletionNotice(self)
boilerplate.addAtRisk(self)
addNoteHeaders(self)
boilerplate.removeUnwantedBoilerplate(self)
wpt.processWptElements(self)
shorthands.run(self)
inlineTags.processTags(self)
canonicalizeShortcuts(self)
addImplicitAlgorithms(self)
fixManualDefTables(self)
headings.processHeadings(self)
checkVarHygiene(self)
processIssuesAndExamples(self)
idl.markupIDL(self)
inlineRemoteIssues(self)
addImageSize(self)
# Handle all the links
processBiblioLinks(self)
processDfns(self)
idl.processIDL(self)
dfns.annotateDfns(self)
formatArgumentdefTables(self)
formatElementdefTables(self)
processAutolinks(self)
biblio.dedupBiblioReferences(self)
verifyUsageOfAllLocalBiblios(self)
caniuse.addCanIUsePanels(self)
boilerplate.addIndexSection(self)
boilerplate.addExplicitIndexes(self)
boilerplate.addStyles(self)
boilerplate.addReferencesSection(self)
boilerplate.addPropertyIndex(self)
boilerplate.addIDLSection(self)
boilerplate.addIssuesSection(self)
boilerplate.addCustomBoilerplate(self)
headings.processHeadings(self, "all") # again
boilerplate.removeUnwantedBoilerplate(self)
boilerplate.addTOCSection(self)
addSelfLinks(self)
processAutolinks(self)
boilerplate.addAnnotations(self)
boilerplate.removeUnwantedBoilerplate(self)
# Add MDN panels after all IDs/anchors have been added
mdnspeclinks.addMdnPanels(self)
highlight.addSyntaxHighlighting(self)
boilerplate.addBikeshedBoilerplate(self)
fingerprinting.addTrackingVector(self)
fixIntraDocumentReferences(self)
fixInterDocumentReferences(self)
removeMultipleLinks(self)
forceCrossorigin(self)
lint.brokenLinks(self)
lint.accidental2119(self)
lint.missingExposed(self)
lint.requiredIDs(self)
lint.unusedInternalDfns(self)
# Any final HTML cleanups
cleanupHTML(self)
if self.md.prepTR:
# Don't try and override the W3C's icon.
for el in findAll("[rel ~= 'icon']", self):
removeNode(el)
# Make sure the W3C stylesheet is after all other styles.
for el in findAll("link", self):
if el.get("href").startswith("https://www.w3.org/StyleSheets/TR"):
appendChild(find("head", self), el)
# Ensure that all W3C links are https.
for el in findAll("a", self):
href = el.get("href", "")
if href.startswith("http://www.w3.org") or href.startswith("http://lists.w3.org"):
el.set("href", "https" + href[4:])
text = el.text or ""
if text.startswith("http://www.w3.org") or text.startswith("http://lists.w3.org"):
el.text = "https" + text[4:]
# Loaded from .include files
extensions.BSPrepTR(self) # pylint: disable=no-member
return self
def serialize(self):
try:
rendered = h.Serializer(self.md.opaqueElements, self.md.blockElements).serialize(self.document)
except Exception as e:
die("{0}", e)
return
rendered = finalHackyCleanup(rendered)
return rendered
def fixMissingOutputFilename(self, outputFilename):
if outputFilename is None:
# More sensible defaults!
if not isinstance(self.inputSource, FileInputSource):
outputFilename = "-"
elif self.inputSource.sourceName.endswith(".bs"):
outputFilename = self.inputSource.sourceName[0:-3] + ".html"
elif self.inputSource.sourceName.endswith(".src.html"):
outputFilename = self.inputSource.sourceName[0:-9] + ".html"
else:
outputFilename = "-"
return outputFilename
def finish(self, outputFilename=None, newline=None):
self.printResultMessage()
outputFilename = self.fixMissingOutputFilename(outputFilename)
rendered = self.serialize()
if not constants.dryRun:
try:
if outputFilename == "-":
sys.stdout.write(rendered)
else:
with open(outputFilename, "w", encoding="utf-8", newline=newline) as f:
f.write(rendered)
except Exception as e:
die(
"Something prevented me from saving the output document to {0}:\n{1}",
outputFilename,
e,
)
def printResultMessage(self):
# If I reach this point, I've succeeded, but maybe with reservations.
fatals = messageCounts["fatal"]
links = messageCounts["linkerror"]
warnings = messageCounts["warning"]
if self.lineNumbers:
warn("Because --line-numbers was used, no output was saved.")
if fatals:
success("Successfully generated, but fatal errors were suppressed")
return
if links:
success("Successfully generated, with {0} linking errors", links)
return
if warnings:
success("Successfully generated, with warnings")
return
def watch(self, outputFilename, port=None, localhost=False):
import time
outputFilename = self.fixMissingOutputFilename(outputFilename)
if self.inputSource.mtime() is None:
die(f"Watch mode doesn't support {self.inputSource}")
if outputFilename == "-":
die("Watch mode doesn't support streaming to STDOUT.")
return
if port:
# Serve the folder on an HTTP server
import http.server
import socketserver
import threading
class SilentServer(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(("localhost" if localhost else "", port), SilentServer)
print(f"Serving at port {port}")
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
else:
server = None
mdCommandLine = self.mdCommandLine
try:
self.preprocess()
self.finish(outputFilename)
lastInputModified = {dep: dep.mtime() for dep in self.transitiveDependencies}
p("==============DONE==============")
try:
while True:
# Comparing mtimes with "!=" handles when a file starts or
# stops existing, and it's fine to rebuild if an mtime
# somehow gets older.
if any(input.mtime() != lastModified for input, lastModified in lastInputModified.items()):
resetSeenMessages()
p("Source file modified. Rebuilding...")
self.initializeState()
self.mdCommandLine = mdCommandLine
self.preprocess()
self.finish(outputFilename)
lastInputModified = {dep: dep.mtime() for dep in self.transitiveDependencies}
p("==============DONE==============")
time.sleep(1)
except KeyboardInterrupt:
p("Exiting~")
if server:
server.shutdown()
thread.join()
sys.exit(0)
except Exception as e:
die("Something went wrong while watching the file:\n{0}", e)
def fixText(self, text, moreMacros={}):
# Do several textual replacements that need to happen *before* the document is parsed as h.
# If markdown shorthands are on, remove all `foo`s while processing,
# so their contents don't accidentally trigger other stuff.
# Also handle markdown escapes.
if "markdown" in self.md.markupShorthands:
textFunctor = MarkdownCodeSpans(text)
else:
textFunctor = Functor(text)
macros = dict(self.macros, **moreMacros)
textFunctor = textFunctor.map(curry(replaceMacros, macros=macros))
textFunctor = textFunctor.map(fixTypography)
if "css" in self.md.markupShorthands:
textFunctor = textFunctor.map(replaceAwkwardCSSShorthands)
return textFunctor.extract()
def printTargets(self):
p("Exported terms:")
for el in findAll("[data-export]", self):
for term in config.linkTextsFromElement(el):
p(" " + term)
p("Unexported terms:")
for el in findAll("[data-noexport]", self):
for term in config.linkTextsFromElement(el):
p(" " + term)
def isOpaqueElement(self, el):
if el.tag in self.md.opaqueElements:
return True
if el.get("data-opaque") is not None:
return True
return False
def findImplicitInputFile():
"""
Find what input file the user *probably* wants to use,
by scanning the current folder.
In preference order:
1. index.bs
2. Overview.bs
3. the first file with a .bs extension
4. the first file with a .src.html extension
"""
if os.path.isfile("index.bs"):
return "index.bs"
if os.path.isfile("Overview.bs"):
return "Overview.bs"
allBs = glob.glob("*.bs")
if allBs:
return allBs[0]
allHtml = glob.glob("*.src.html")
if allHtml:
return allHtml[0]
return None
constants.specClass = Spec
styleColors = """
/* Any --*-text not paired with a --*-bg is assumed to have a transparent bg */
:root {
color-scheme: light dark;
--text: black;
--bg: white;
--unofficial-watermark: url(https://www.w3.org/StyleSheets/TR/2016/logos/UD-watermark);
--logo-bg: #1a5e9a;
--logo-active-bg: #c00;
--logo-text: white;
--tocnav-normal-text: #707070;
--tocnav-normal-bg: var(--bg);
--tocnav-hover-text: var(--tocnav-normal-text);
--tocnav-hover-bg: #f8f8f8;
--tocnav-active-text: #c00;
--tocnav-active-bg: var(--tocnav-normal-bg);
--tocsidebar-text: var(--text);
--tocsidebar-bg: #f7f8f9;
--tocsidebar-shadow: rgba(0,0,0,.1);
--tocsidebar-heading-text: hsla(203,20%,40%,.7);
--toclink-text: var(--text);
--toclink-underline: #3980b5;
--toclink-visited-text: var(--toclink-text);
--toclink-visited-underline: #054572;
--heading-text: #005a9c;
--hr-text: var(--text);
--algo-border: #def;
--del-text: red;
--del-bg: transparent;
--ins-text: #080;
--ins-bg: transparent;
--a-normal-text: #034575;
--a-normal-underline: #bbb;
--a-visited-text: var(--a-normal-text);
--a-visited-underline: #707070;
--a-hover-bg: rgba(75%, 75%, 75%, .25);
--a-active-text: #c00;
--a-active-underline: #c00;
--blockquote-border: silver;
--blockquote-bg: transparent;
--blockquote-text: currentcolor;
--issue-border: #e05252;
--issue-bg: #fbe9e9;
--issue-text: var(--text);
--issueheading-text: #831616;
--example-border: #e0cb52;
--example-bg: #fcfaee;
--example-text: var(--text);
--exampleheading-text: #574b0f;
--note-border: #52e052;
--note-bg: #e9fbe9;
--note-text: var(--text);
--noteheading-text: hsl(120, 70%, 30%);
--notesummary-underline: silver;
--assertion-border: #aaa;
--assertion-bg: #eee;
--assertion-text: black;
--advisement-border: orange;
--advisement-bg: #fec;
--advisement-text: var(--text);
--advisementheading-text: #b35f00;
--warning-border: red;
--warning-bg: hsla(40,100%,50%,0.95);
--warning-text: var(--text);
--amendment-border: #330099;
--amendment-bg: #F5F0FF;
--amendment-text: var(--text);
--amendmentheading-text: #220066;
--def-border: #8ccbf2;
--def-bg: #def;
--def-text: var(--text);
--defrow-border: #bbd7e9;
--datacell-border: silver;
--indexinfo-text: #707070;
--indextable-hover-text: black;
--indextable-hover-bg: #f7f8f9;
--outdatedspec-bg: rgba(0, 0, 0, .5);
--outdatedspec-text: black;
--outdated-bg: maroon;
--outdated-text: white;
--outdated-shadow: red;
--editedrec-bg: darkorange;
}"""
styleDarkMode = """
@media (prefers-color-scheme: dark) {
:root {
--text: #ddd;
--bg: black;
--unofficial-watermark: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='400' height='400'%3E%3Cg fill='%23100808' transform='translate(200 200) rotate(-45) translate(-200 -200)' stroke='%23100808' stroke-width='3'%3E%3Ctext x='50%25' y='220' style='font: bold 70px sans-serif; text-anchor: middle; letter-spacing: 6px;'%3EUNOFFICIAL%3C/text%3E%3Ctext x='50%25' y='305' style='font: bold 70px sans-serif; text-anchor: middle; letter-spacing: 6px;'%3EDRAFT%3C/text%3E%3C/g%3E%3C/svg%3E");
--logo-bg: #1a5e9a;
--logo-active-bg: #c00;
--logo-text: white;
--tocnav-normal-text: #999;
--tocnav-normal-bg: var(--bg);
--tocnav-hover-text: var(--tocnav-normal-text);
--tocnav-hover-bg: #080808;
--tocnav-active-text: #f44;
--tocnav-active-bg: var(--tocnav-normal-bg);
--tocsidebar-text: var(--text);
--tocsidebar-bg: #080808;
--tocsidebar-shadow: rgba(255,255,255,.1);
--tocsidebar-heading-text: hsla(203,20%,40%,.7);
--toclink-text: var(--text);
--toclink-underline: #6af;
--toclink-visited-text: var(--toclink-text);
--toclink-visited-underline: #054572;
--heading-text: #8af;
--hr-text: var(--text);
--algo-border: #456;
--del-text: #f44;
--del-bg: transparent;
--ins-text: #4a4;
--ins-bg: transparent;
--a-normal-text: #6af;
--a-normal-underline: #555;
--a-visited-text: var(--a-normal-text);
--a-visited-underline: var(--a-normal-underline);
--a-hover-bg: rgba(25%, 25%, 25%, .2);
--a-active-text: #f44;
--a-active-underline: var(--a-active-text);
--borderedblock-bg: rgba(255, 255, 255, .05);
--blockquote-border: silver;
--blockquote-bg: var(--borderedblock-bg);
--blockquote-text: currentcolor;
--issue-border: #e05252;
--issue-bg: var(--borderedblock-bg);
--issue-text: var(--text);
--issueheading-text: hsl(0deg, 70%, 70%);
--example-border: hsl(50deg, 90%, 60%);
--example-bg: var(--borderedblock-bg);
--example-text: var(--text);
--exampleheading-text: hsl(50deg, 70%, 70%);
--note-border: hsl(120deg, 100%, 35%);
--note-bg: var(--borderedblock-bg);
--note-text: var(--text);
--noteheading-text: hsl(120, 70%, 70%);
--notesummary-underline: silver;
--assertion-border: #444;
--assertion-bg: var(--borderedblock-bg);
--assertion-text: var(--text);
--advisement-border: orange;
--advisement-bg: #222218;
--advisement-text: var(--text);
--advisementheading-text: #f84;
--warning-border: red;
--warning-bg: hsla(40,100%,20%,0.95);
--warning-text: var(--text);
--amendment-border: #330099;
--amendment-bg: #080010;
--amendment-text: var(--text);
--amendmentheading-text: #cc00ff;
--def-border: #8ccbf2;
--def-bg: #080818;
--def-text: var(--text);
--defrow-border: #136;
--datacell-border: silver;
--indexinfo-text: #aaa;
--indextable-hover-text: var(--text);
--indextable-hover-bg: #181818;
--outdatedspec-bg: rgba(255, 255, 255, .5);
--outdatedspec-text: black;
--outdated-bg: maroon;
--outdated-text: white;
--outdated-shadow: red;
--editedrec-bg: darkorange;
}
/* In case a transparent-bg image doesn't expect to be on a dark bg,
which is quite common in practice... */
img { background: white; }
}"""
styleMdLists = """
/* This is a weird hack for me not yet following the commonmark spec
regarding paragraph and lists. */
[data-md] > :first-child {
margin-top: 0;
}
[data-md] > :last-child {
margin-bottom: 0;
}"""
styleAutolinks = """
.css.css, .property.property, .descriptor.descriptor {
color: var(--a-normal-text);
font-size: inherit;
font-family: inherit;
}
.css::before, .property::before, .descriptor::before {
content: "‘";
}
.css::after, .property::after, .descriptor::after {
content: "’";
}
.property, .descriptor {
/* Don't wrap property and descriptor names */
white-space: nowrap;
}
.type { /* CSS value <type> */
font-style: italic;
}
pre .property::before, pre .property::after {
content: "";
}
[data-link-type="property"]::before,
[data-link-type="propdesc"]::before,
[data-link-type="descriptor"]::before,
[data-link-type="value"]::before,
[data-link-type="function"]::before,
[data-link-type="at-rule"]::before,
[data-link-type="selector"]::before,
[data-link-type="maybe"]::before {
content: "‘";
}
[data-link-type="property"]::after,
[data-link-type="propdesc"]::after,
[data-link-type="descriptor"]::after,
[data-link-type="value"]::after,
[data-link-type="function"]::after,
[data-link-type="at-rule"]::after,
[data-link-type="selector"]::after,
[data-link-type="maybe"]::after {
content: "’";
}
[data-link-type].production::before,
[data-link-type].production::after,
.prod [data-link-type]::before,
.prod [data-link-type]::after {
content: "";
}
[data-link-type=element],
[data-link-type=element-attr] {
font-family: Menlo, Consolas, "DejaVu Sans Mono", monospace;
font-size: .9em;
}
[data-link-type=element]::before { content: "<" }
[data-link-type=element]::after { content: ">" }
[data-link-type=biblio] {
white-space: pre;
}"""
styleSelflinks = """
:root {
--selflink-text: white;
--selflink-bg: gray;
--selflink-hover-text: black;
}
.heading, .issue, .note, .example, li, dt {
position: relative;
}
a.self-link {
position: absolute;
top: 0;
left: calc(-1 * (3.5rem - 26px));
width: calc(3.5rem - 26px);
height: 2em;
text-align: center;
border: none;
transition: opacity .2s;
opacity: .5;
}
a.self-link:hover {
opacity: 1;
}
.heading > a.self-link {
font-size: 83%;
}
li > a.self-link {
left: calc(-1 * (3.5rem - 26px) - 2em);
}
dfn > a.self-link {
top: auto;
left: auto;
opacity: 0;
width: 1.5em;
height: 1.5em;
background: var(--selflink-bg);
color: var(--selflink-text);
font-style: normal;
transition: opacity .2s, background-color .2s, color .2s;
}
dfn:hover > a.self-link {
opacity: 1;
}
dfn > a.self-link:hover {
color: var(--selflink-hover-text);
}
a.self-link::before { content: "¶"; }
.heading > a.self-link::before { content: "§"; }
dfn > a.self-link::before { content: "#"; }
"""
styleDarkMode += """
@media (prefers-color-scheme: dark) {
:root {
--selflink-text: black;
--selflink-bg: silver;
--selflink-hover-text: white;
}
}
"""
styleCounters = """
body {
counter-reset: example figure issue;
}
.issue {
counter-increment: issue;
}
.issue:not(.no-marker)::before {
content: "Issue " counter(issue);
}
.example {
counter-increment: example;
}
.example:not(.no-marker)::before {
content: "Example " counter(example);
}
.invalid.example:not(.no-marker)::before,
.illegal.example:not(.no-marker)::before {
content: "Invalid Example" counter(example);
}
figcaption {
counter-increment: figure;
}
figcaption:not(.no-marker)::before {
content: "Figure " counter(figure) " ";
}"""
styleIssues = """
a[href].issue-return {
float: right;
float: inline-end;
color: var(--issueheading-text);
font-weight: bold;
text-decoration: none;
}
"""
|
FactorUpdateManager.py
|
from Core.DAO.TickDataDao import TickDataDao
from Core.DAO.FactorDao.FactorDao import FactorDao
from Core.Conf.TickDataConf import TickDataConf
from Core.WorkerNode.WorkerNodeImpl.WorkerTaskManager import TaskGroup, TaskConst, Task
from Core.Error.Error import Error
from Core.Conf.PathConf import Path
from Core.WorkerNode.WorkerNodeImpl.Message import FinishACKMessage, KillMessage, MessageLogger
from Core.WorkerNode.WorkerNodeImpl.FileSaver import FileSaver
from Core.WorkerNode.WorkerNodeImpl.MessageSender import MessageSender
import sys, os, traceback, datetime, threading
import pandas as pd
class FactorUpdateManager(object):
"""
FactorUpdateManager calculate to update dates and split a factor update task to unit tasks
which only update one day of factor data
"""
def __init__(self, task_manager, db_engine, logger):
self._logger = logger
self._task_manager = task_manager
self._db_engine = db_engine
self._tick_dao = TickDataDao(db_engine, logger)
self._factor_dao = FactorDao(db_engine, logger)
threading.Thread(target=lambda: self._task_manager.run()).start()
def update_linkage(self, factor, version, stock_code, task_id):
"""
update factor data
:return: err_code
"""
generator_path = "{0}/{1}/{2}".format(Path.FACTOR_GENERATOR_BASE, factor, version)
# download file if not exists
if not os.path.exists(generator_path):
# download script
err, code_file = self._factor_dao.get_factor_version_code(factor, version)
if err:
return err
err = FileSaver.save_code_to_fs(factor, version, bytes(code_file), self._logger)
if err:
return err, None
# fetch updated dates
err, updated_days = self._factor_dao.list_updated_dates(factor, version, stock_code)
if err:
return err, None
# fetch tick data dates
err, tick_days = self._tick_dao.list_updated_dates(stock_code)
# get to update dates
to_update_days = sorted(list(set(tick_days) - set(updated_days)))
# convert tick data to factors
var_list = [(factor, version, stock_code, day)
for day in to_update_days]
update_item_num = len(var_list)
if update_item_num == 0:
return Error.ERROR_TASK_HAS_NOTHING_TO_BE_DONE, 0
task_group = TaskGroup(TaskConst.TaskType.UPDATE_FACTOR_TASK, task_id)
for var in var_list:
task = Task(TaskConst.TaskType.UPDATE_FACTOR_TASK, self.__make_task_sub_id(*var))
task.set_target(update_day_factor_in_async, args=var)
task_group.add_task(task)
self._task_manager.apply_task_group(task_group)
return Error.SUCCESS, update_item_num
@staticmethod
def __make_task_sub_id(*args):
return "_".join([str(arg) for arg in args])
def query_update_status(self, task_id):
return self._task_manager.query_group_progress(task_id)
def stop_task_group(self, group_id):
return self._task_manager.stop_task_group(group_id)
def stop_all_factor_update_task_groups(self):
return self._task_manager.stop_task_groups(task_type=TaskConst.TaskType.UPDATE_FACTOR_TASK)
def update_day_factor_in_async(factor, version, stock_code, day, *args, **kwargs):
"""
update factor of a single day
:param factor:
:param version:
:param stock_code:
:param day:
:param args:
:param kwargs:
:return:
"""
from Core.Conf.DatabaseConf import DBConfig
# get task info
task_id = kwargs.get(TaskConst.TaskParam.TASK_ID)
task_queue = kwargs.get(TaskConst.TaskParam.TASK_MANAGER_QUEUE)
task_group_id = kwargs.get(TaskConst.TaskParam.TASK_GROUP_ID)
log_stack = kwargs.get(TaskConst.TaskParam.LOG_STACK)
# get global variable
db_engine = DBConfig.default_config().create_default_sa_engine_without_pool()
# add sys path if not exists
if Path.FACTOR_GENERATOR_BASE not in sys.path:
sys.path.append(Path.FACTOR_GENERATOR_BASE)
# set task status
_task_aborted = False
# set logger
logger = MessageLogger(task_id, task_group_id, log_stack, task_queue)
# log start info
logger.log_info("factor update task starting...")
try:
# create database connection
tick_dao = TickDataDao(db_engine, logger)
factor_dao = FactorDao(db_engine, logger)
# fetch data of a day
daytime = datetime.datetime(year=day.year, month=day.month, day=day.day)
err, day_df = tick_dao.load_updated_tick_data(stock_code, daytime)
if err:
logger.log_error("({}) failed to fetch tick data".format(err))
_task_aborted = True
return err
if day_df.shape[0] < 1000:
logger.log_info("too few tick data({} ticks)".format(day_df.shape[0]))
return Error.SUCCESS
err, link_id = factor_dao.get_linkage_id(factor, version, stock_code)
if err:
_task_aborted = True
return err
# add factor generator script path to python sys path
generator_path = "{0}/{1}/{2}".format(Path.FACTOR_GENERATOR_BASE, factor, version)
# run python script
generator_module_name = [f for f in os.listdir(generator_path) if f != "__init__.py"][0]
generator_module_path = "{0}/{1}".format(generator_path, generator_module_name)
if not os.path.isdir(generator_module_path):
if generator_module_name.endswith(".py"):
generator_module_name = generator_module_name[:-3]
else:
logger.log_error("Unrecognized file type: {}".format(generator_module_name))
_task_aborted = True
return Error.ERROR_UNRECOGNIZED_FILE_TYPE
try:
import importlib
generator_module = importlib.import_module("{0}.{1}.{2}".format(factor, version,
generator_module_name))
except:
logger.log_error(traceback.format_exc())
_task_aborted = True
return Error.ERROR_FAILED_TO_LOAD_FACTOR_GENERATOR_MODULE
factor_generator = generator_module.factor_generator
# execute factor generator
try:
factor_value = factor_generator(day_df, stock_code, day)
if isinstance(factor_value, pd.DataFrame):
signature = [factor]
if factor_dao.is_group_factor(factor):
err, signature = factor_dao.get_sub_factors(factor, version)
if err:
_task_aborted = True
return err
if not set(factor_value.columns).issuperset(set(signature)):
_task_aborted = True
return Error.ERROR_GROUP_FACTOR_SIGNATURE_NOT_MATCHED
factor_value = factor_value[signature]
factor_value['date'] = day_df['date'].tolist()
factor_value['datetime'] = day_df['datetime'].tolist()
elif not isinstance(factor_value, list) or len(factor_value) != TickDataConf.TICK_LENGTH:
_task_aborted = True
logger.log_error("invalid factor result format:\n" + str(factor_value))
return Error.ERROR_INVALID_FACTOR_RESULT
else:
factor_value = pd.DataFrame({"datetime": day_df['datetime'], factor: factor_value, "date": day_df['date']})
err, msg = MessageSender.send_factor_result_to_master(factor, version, stock_code, day, factor_value,
task_group_id, logger)
if err:
logger.log_error("Error occurred during tick update callback: {0} {1}".format(err, msg))
if err == Error.ERROR_TASK_NOT_EXISTS:
task_queue.put(KillMessage(task_id))
return
_task_aborted = True
return err
except:
logger.log_error(traceback.format_exc())
_task_aborted = True
return Error.ERROR_FACTOR_GENERATE_FAILED
except:
_task_aborted = True
logger.log_error(traceback.format_exc())
return Error.ERROR_FACTOR_GENERATE_FAILED
finally:
logger.log_info("task finished")
task_queue.put(FinishACKMessage(task_id, aborted=_task_aborted))
|
2dVisualizer.py
|
"""
Created on 2/11/20
Marquette Robotics Club
Danny Hudetz
Purpose: read from the hdf5 format and visualize the coordinates mapped nearest
to user input in 2 dimensions
"""
import numpy as math
import pygame
import threading
import vis
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GRAY =(100, 100, 100)
LIGHTGRAY=(50,50,50)
pygame.init()
# Set the width and height of the screen [width, height]
WIDTH = 600
HEIGHT = 600
center = pygame.math.Vector2()
center.x = WIDTH/2
center.y = HEIGHT/2
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("MegarmModel")
clock = pygame.time.Clock()
font = pygame.font.Font('freesansbold.ttf', 15)
# Loop until the user clicks the close button.
done = False
#graphics
scaleFactor = 3
lineWidth = 7
doGrid = True
gridTileSize = 10 #cm
fps = 60
operationHeight=0
operationHeightStore=operationHeight
ar=az=br=bz=cr=cz=frameCount=deg=deg2=endAngle=0
resetAngles=(90,-90,90)
previousAngles=resetAngles
currentAngles=resetAngles
POI=[0,0]
circles=[]
file=None
img = pygame.image.load("marquette_robotics.png")
imgScaled = pygame.transform.scale(img, (200, 66))
back=None
def userInputLoop():
global done, file, circles, currentAngles, previousAngles, back,ar,az,br,bz,cr,cz
print("\nMegarm Visualizer")
f=None
while not done:
userInput = input("Import coordinate r z? Type \'help\' for more options: ")
words=userInput.split()
if len(words)==2:
if(words[0]=='f'):
(file,a,b,c) = vis.getFile(words[1])
if file!=None:
back=vis.backEnd(a,b,c)
currentAngles=resetAngles
previousAngles=resetAngles
circles=[]
(ar,az,br,bz,cr,cz)=back.calculateComponents(resetAngles[0],resetAngles[1],resetAngles[2])
elif file!=None:
currentAngles=vis.getServoAngles(file, float(words[0]),float(words[1]))
else:
print("File not imported.")
elif len(words)==0:
print("Improper syntax")
elif words[0]=="help":
print("To enter a coordinate just type the r and z.")
print("Example: 15.0 10.0")
print("To change hdf5 file reference, type f and the file name.")
print("Example: f 31.5-31.5-7.0.hdf5")
print("To quit, type q.")
elif words[0]=="q":
done=True
pygame.quit()
else:
print("Improper syntax")
def overlay(t, x, y, color):
text = font.render(t, True, color, BLACK)
textRect = text.get_rect()
textRect.center = (x, y)
screen.blit(text, textRect)
def drawGrid():
for i in range(0,int(WIDTH/(scaleFactor*gridTileSize*2))+1):
gridRight = int(i*(scaleFactor*gridTileSize))+center.x
gridLeft = center.x-int(i*(scaleFactor*gridTileSize))
pygame.draw.line(screen, LIGHTGRAY, (gridRight, 0), (gridRight, HEIGHT), 1)
pygame.draw.line(screen, LIGHTGRAY, (gridLeft, 0), (gridLeft, HEIGHT), 1)
for j in range(0,int(HEIGHT/(scaleFactor*gridTileSize*2))+1):
gridDown = int(j*(scaleFactor*gridTileSize))+center.y
gridUp = center.y-int(j*(scaleFactor*gridTileSize))
pygame.draw.line(screen, LIGHTGRAY, (0, gridUp), (WIDTH, gridUp), 1)
pygame.draw.line(screen, LIGHTGRAY, (0, gridDown), (WIDTH, gridDown), 1)
try:
userThread = threading.Thread(target=userInputLoop, args=())
userThread.start()
except:
print("Error: unable to start thread")
moving=False
sineCount=0
posCount=0
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
frameCount+=1
if moving:
if sineCount<=math.pi:
(ar,az,br,bz,cr,cz)=back.calculateComponents((previousAngles[0]-currentAngles[0])*(math.cos(sineCount)+1)/2+currentAngles[0],
(previousAngles[1]-currentAngles[1])*(math.cos(sineCount)+1)/2+currentAngles[1],
(previousAngles[2]-currentAngles[2])*(math.cos(sineCount)+1)/2+currentAngles[2])
sineCount+=math.pi/100
else:
moving=False
previousAngles=currentAngles
if posCount==3:
posCount=0
else:
posCount+=1
if previousAngles!=currentAngles and not moving:
moving=True
sineCount=0
screen.fill(BLACK)
avector = pygame.math.Vector2()
avector.x = ar*scaleFactor
avector.y = az*scaleFactor
bvector = pygame.math.Vector2()
bvector.x = br*scaleFactor
bvector.y = bz*scaleFactor
cvector = pygame.math.Vector2()
cvector.x = cr*scaleFactor
cvector.y = cz*scaleFactor
POI = center+avector+bvector+cvector
if moving:
circles.append(POI)
for cir in circles:
pygame.draw.circle(screen, GRAY, [int(cir.x),int(cir.y)], 1)
if doGrid:
drawGrid()
pygame.draw.line(screen, RED, center, center+avector, lineWidth)
pygame.draw.line(screen, GREEN, center+avector, center+avector+bvector, lineWidth)
pygame.draw.line(screen, BLUE, center+avector+bvector, POI, lineWidth)
pygame.draw.circle(screen, WHITE, [int(POI.x),int(POI.y)], 3)
pygame.draw.circle(screen, WHITE, [int(center.x),int(center.y)], 3)
pygame.draw.circle(screen, WHITE, [int((center+avector).x),int((center+avector).y)], 3)
pygame.draw.circle(screen, WHITE, [int((center+avector+bvector).x),int((center+avector+bvector).y)], 3)
finalRadius = (POI.x-center.x)/scaleFactor
finalHeight = -(POI.y-center.y)/scaleFactor
overlay("Grid tile is "+str(gridTileSize)+"cm by "+str(gridTileSize)+"cm", 100, 30, WHITE)
overlay("Radius: " + str(round(finalRadius,3)) + "cm", 100, 50, WHITE)
overlay("Height: " + str(round(finalHeight,3)) + "cm", 100, 70, WHITE)
screen.blit(imgScaled, (WIDTH-200, 0))
pygame.display.update()
clock.tick(fps)
pygame.quit()
|
setup.py
|
#!/usr/bin/env python
import os
import re
import sys
from setuptools import setup
from setuptools import find_packages
from setuptools.command.test import test as TestCommand
v = open(os.path.join(os.path.dirname(__file__), 'spyne', '__init__.py'), 'r')
VERSION = re.match(r".*__version__ = '(.*?)'", v.read(), re.S).group(1)
LONG_DESC = """Spyne aims to save the protocol implementers the hassle of
implementing their own remote procedure call api and the application programmers
the hassle of jumping through hoops just to expose their services using multiple
protocols and transports.
"""
try:
os.stat('CHANGELOG.rst')
LONG_DESC += "\n\n" + open('CHANGELOG.rst', 'r').read()
except OSError:
pass
SHORT_DESC="""A transport and architecture agnostic rpc library that focuses on
exposing public services with a well-defined API."""
def call_test(f, a, tests):
import spyne.test
from glob import glob
from itertools import chain
from multiprocessing import Process, Queue
tests_dir = os.path.dirname(spyne.test.__file__)
a.extend(chain(*[glob("%s/%s" % (tests_dir, test)) for test in tests]))
queue = Queue()
p = Process(target=_wrapper(f), args=[a, queue])
p.start()
p.join()
ret = queue.get()
if ret == 0:
print tests, "OK"
else:
print tests, "FAIL"
return ret
def _wrapper(f):
def _(args, queue):
try:
retval = f(args)
except TypeError: # it's a pain to call trial.
sys.argv = ['trial']
sys.argv.extend(args)
retval = f()
queue.put(retval)
return _
def call_pytest(*tests):
import pytest
return call_test(pytest.main, ['-v', '--tb=short'], tests)
def call_trial(*tests):
from twisted.scripts.trial import usage
from twisted.scripts.trial import Options
from twisted.scripts.trial import _makeRunner
from twisted.scripts.trial import _getSuite
def run():
config = Options()
config.parseOptions()
trialRunner = _makeRunner(config)
suite = _getSuite(config)
test_result = trialRunner.run(suite)
return int(not test_result.wasSuccessful())
return call_test(run, [], tests)
class RunTests(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
print "running tests"
ret = 0
ret = call_pytest('interface', 'model', 'protocol', 'test_*') or ret
ret = call_pytest('interop/test_httprpc.py') or ret
ret = call_pytest('interop/test_soap_client_http.py') or ret
ret = call_pytest('interop/test_soap_client_zeromq.py') or ret
ret = call_pytest('interop/test_suds.py') or ret
ret = call_trial('interop/test_soap_client_http_twisted.py') or ret
if ret == 0:
print "All that glisters is not gold."
else:
print "Something is rotten in the state of Denmark."
raise SystemExit(ret)
test_reqs = ['pytest', 'werkzeug', 'sqlalchemy', 'suds', 'msgpack-python', 'pyparsing']
if sys.version_info < (2,6):
test_reqs.extend([
'zope.interface<4',
'twisted<12',
'pyzmq<2.2',
'multiprocessing',
'simplejson',
])
else:
test_reqs.extend([
'twisted',
'pyzmq',
])
setup(
name='spyne',
packages=find_packages(),
version=VERSION,
description=SHORT_DESC,
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords=('soap', 'wsdl', 'wsgi', 'zeromq', 'rest', 'rpc', 'json', 'http',
'msgpack', 'xml'),
author='Burak Arslan',
author_email='burak+spyne@arskom.com.tr',
maintainer='Burak Arslan',
maintainer_email='burak+spyne@arskom.com.tr',
url='http://spyne.io',
license='LGPL-2.1',
zip_safe=False,
install_requires=[
'pytz',
'lxml>=2.3',
'defusedxml>=0.3',
],
entry_points = {
'console_scripts': [
'sort_wsdl=spyne.test.sort_wsdl:main',
]
},
tests_require = test_reqs,
cmdclass = {'test': RunTests},
)
|
flask_server.py
|
from utils import log
import config
import cv2
import numpy as np
from flask import Flask, request
from flask import render_template
from flask_cors import CORS
import os
import json
import time
import threading
import sys
print('path: ',os.path.dirname(os.path.abspath(__file__)))
# 获取资源路径
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
app = Flask(__name__, template_folder=resource_path('templates'),static_folder=resource_path('statics'))
CORS(app, resources=r'/*')
#测试使用
b_test=False
# 接收第一次点击经纬都找湖
@app.route('/')
def index():
return render_template('map3.html')
logger = log.LogHandler('main')
# 湖轮廓像素位置
@app.route('/pool_cnts', methods=['GET', 'POST'])
def pool_cnts():
print(request)
print("request.url", request.url)
print("request.data", request.data)
if b_test:
return json.dumps({'data':'391,599 745,539 872,379 896,254 745,150 999,63 499,0 217,51 66,181 0,470'})
else:
# 失败返回提示信息
if ship_obj.pix_cnts is None:
return '初始经纬度像素点未生成'
#{'data':'391, 599 745, 539 872, 379 896, 254 745, 150 999, 63 499, 0 217, 51 66, 181 0, 470'}
else:
str_pix_points = ''
for index, value in enumerate(ship_obj.pix_cnts):
if index == len(ship_obj.pix_cnts) - 1:
str_pix_points += str(value[0]) + ',' + str(value[1])
else:
str_pix_points += str(value[0]) + ',' + str(value[1]) + ' '
return_json = json.dumps({'data': str_pix_points})
print('pool_cnts',return_json)
return return_json
# 获取在线船列表
@app.route('/online_ship', methods=['GET', 'POST'])
def online_ship():
print(request)
print('request.data', request.data)
if b_test:
return_data = {
# 船号
"ids": [1, 2, 8,10,18],
# 船像素信息数组
"pix_postion": [[783, 1999], [132, 606], [52, 906], [0, 1569]],
# 船是否配置行驶点 1为已经配置 0位还未配置
"config_path": [1, 1, 0, 1],
# 船剩余电量0-100整数
"dump_energy": [90, 37, 80, 60],
# 船速度 单位:m/s 浮点数
"speed": [3.5, 2.0, 1.0, 5.0]
}
return json.dumps(return_data)
else:
return_data = {
# 船号
"ids": ship_obj.online_ship_list,
# 船像素信息数组
"pix_postion": [ship_obj.ship_pix_position_dict.get(i) for i in ship_obj.online_ship_list],
# 船是否配置行驶点 1为已经配置 0位还未配置
"config_path": [1 if i in ship_obj.config_ship_lng_lats_dict else 0 for i in ship_obj.online_ship_list],
# 船剩余电量0-100整数
"dump_energy": [ship_obj.ship_dump_energy_dict.get(i) for i in ship_obj.online_ship_list],
# 船速度 单位:m/s 浮点数
"speed": [ship_obj.ship_speed_dict.get(i) for i in ship_obj.online_ship_list],
"direction":[ship_obj.ship_direction_dict.get(i) for i in ship_obj.online_ship_list]
}
print('online_ship data',return_data)
return json.dumps(return_data)
# 发送一条船配置路径
@app.route('/ship_path', methods=['GET', 'POST'])
def ship_path():
print(request)
print('request.data', request.data)
data = json.loads(request.data)
if ship_obj.pix_cnts is None:
return '还没有湖,别点'
ids_list = []
for i in data['id'].split(' '):
try:
id = int(i)
ids_list.append(id)
except Exception as e :
logger.error({'error: ':e})
# 没有合法id
if len(ids_list)==0 or len(data['data'])<=0:
return
for id in ids_list:
if data['data'][0][0].endswith('px'):
click_pix_points = [[int(i[0][:-2]),int(i[1][:-2])] for i in data['data']]
else:
click_pix_points = [[int(i[0]),int(i[1])] for i in data['data']]
click_lng_lats = []
for point in click_pix_points:
in_cnt = cv2.pointPolygonTest(np.array(ship_obj.pix_cnts), (point[0], point[1]), False)
if in_cnt >= 0:
click_lng_lat = ship_obj.pix_to_lng_lat(point)
click_lng_lats.append(click_lng_lat)
ship_obj.config_ship_lng_lats_dict.update({id:click_lng_lats})
# logger.debug({'config_ship_lng_lats_dict':ship_obj.config_ship_lng_lats_dict})
return 'ship_path'
# 发送所有配置路径到船并启动
@app.route('/send_path', methods=['GET', 'POST'])
def send_path():
print(request)
ship_obj.b_send_path = True
# ship_obj.b_send_control = True
for i in ship_obj.online_ship_list:
ship_obj.ship_control_dict.update({int(i):1})
return 'send_path'
# 控制船启动
@app.route('/ship_start', methods=['GET', 'POST'])
def ship_start():
print(request)
print('request.data', request.data)
ship_obj.b_send_control=True
data = json.loads(request.data)
for i in data['id']:
ship_obj.ship_control_dict.update({int(i):1})
return 'ship_start'
# 控制船停止
@app.route('/ship_stop', methods=['GET', 'POST'])
def ship_stop():
print(request)
print('request.data', request.data)
ship_obj.b_send_control = True
data = json.loads(request.data)
for i in data['id']:
ship_obj.ship_control_dict.update({int(i):0})
return 'ship_stop'
class Ship:
def __init__(self):
self.logger = log.LogHandler('mian')
self.com_logger = log.LogHandler('com_logger')
# 湖泊像素轮廓点
self.pix_cnts = None
# 当前接收到的船号,
self.online_ship_list = []
# 手动控制状态
self.ship_control_dict={}
# 像素位置与经纬度
self.ship_pix_position_dict = {}
self.ship_lng_lat_position_dict = {}
# 用户点击像素点
self.click_pix_points_dict = {}
# 船配置航点
self.config_ship_lng_lats_dict = {}
# 船剩余电量
self.ship_dump_energy_dict={}
# 船速度
self.ship_speed_dict = {}
# 船朝向
self.ship_direction_dict={}
# 是否发送所有路径到船
self.b_send_path = False
self.b_send_control=False
# 采集点经纬度
self.lng_lats_list = []
# 记录当前存在的串口
self.serial_obj = None
# 必须放在主线程中
@staticmethod
def run_flask(debug=True):
# app.run(host='192.168.199.171', port=5500, debug=True)
app.run(host='0.0.0.0', port=8899, debug=debug)
# 经纬度转像素
def lng_lat_to_pix(self,lng_lat):
"""
:param lng_lat: 经纬度
:return:
"""
int_lng_lat = [int(lng_lat[0] * 1000000), int(lng_lat[1] * 1000000)]
int_lng_lats_offset = [int_lng_lat[0] - self.left_up_x, int_lng_lat[1] - self.left_up_y]
int_lng_lats_pix = [int(int_lng_lats_offset[0] / self.scale_w), int(int_lng_lats_offset[1] / self.scale_h)]
return int_lng_lats_pix
# 像素转经纬度
def pix_to_lng_lat(self, pix):
"""
:param pix:像素位置 先w 后h
:return: 经纬度
"""
lng = round((self.left_up_x + pix[0] * self.scale_w) / 1000000.0, 6)
lat = round((self.left_up_y + pix[1] * self.scale_h) / 1000000.0, 6)
return [lng,lat]
def init_cnts_lng_lat_to_pix(self, b_show=False):
lng_lats_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lng_lats.txt')
while not os.path.exists(lng_lats_path):
time.sleep(1)
try:
with open(lng_lats_path, 'r') as f:
temp_list = f.readlines()
for i in temp_list:
i = i.strip()
self.lng_lats_list.append(
[float(i.split(',')[0]), float(i.split(',')[1])])
except Exception as e:
self.logger.error({'lng_lats.txt 格式错误':e})
int_lng_lats_list = [[int(i[0] * 1000000), int(i[1] * 1000000)]
for i in self.lng_lats_list]
(left_up_x, left_up_y, w, h) = cv2.boundingRect(np.array(int_lng_lats_list))
self.left_up_x = left_up_x
self.left_up_y = left_up_y
self.logger.info({'(x, y, w, h) ': (left_up_x, left_up_y, w, h)})
## 像素到单位缩放
# 等比拉伸
if w>=h:
self.scale_w = float(w) / config.pix_w
self.scale_h = float(w) / config.pix_w
else:
self.scale_w = float(h) / config.pix_w
self.scale_h = float(h) / config.pix_w
# 强制拉伸到同样长宽
# self.scale_w = float(w) / config.pix_w
# self.scale_h = float(h) / config.pix_h
# 经纬度转像素
self.pix_cnts = [self.lng_lat_to_pix(i) for i in self.lng_lats_list]
self.logger.info({'self.pix_cnts': self.pix_cnts})
if b_show:
img = np.zeros((config.pix_h, config.pix_w, 3), dtype=np.uint8)
cv2.circle(img, (int(config.pix_w / 2),
int(config.pix_h / 2)), 5, (255, 0, 255), -1)
cv2.drawContours(
img,
np.array(
[self.pix_cnts]),
contourIdx=-1,
color=(255, 0, 0))
print(img.shape)
# 鼠标回调函数
# x, y 都是相对于窗口内的图像的位置
def draw_circle(event, x, y, flags, param):
# 判断事件是否为 Left Button Double Clicck
if event == cv2.EVENT_LBUTTONDBLCLK or event == cv2.EVENT_LBUTTONDOWN:
in_cnt = cv2.pointPolygonTest(
np.array([self.pix_cnts]), (x, y), False)
# 大于0说明属于该轮廓
if in_cnt >= 0:
print('像素', x, y)
lng = round((left_up_x + x * self.scale_w) / 1000000.0, 6)
lat = round((left_up_y + y * self.scale_h) / 1000000.0, 6)
print('经纬度', lng, lat)
cv2.circle(img, (x, y), 5, (255, 0, 0), -1)
if event == cv2.EVENT_RBUTTONDOWN:
in_cnt = cv2.pointPolygonTest(
np.array([self.pix_cnts ]), (x, y), False)
# 大于0说明属于该轮廓
if in_cnt >= 0:
print('像素', x, y)
lng = round((left_up_x + x * self.scale_w) / 1000000.0, 6)
lat = round((left_up_y + y * self.scale_h) / 1000000.0, 6)
print('经纬度', lng, lat)
cv2.circle(img, (x, y), 5, (255, 0, 0), -1)
cv2.namedWindow('img')
# 设置鼠标事件回调
cv2.setMouseCallback('img', draw_circle)
while (True):
cv2.imshow('img', img)
if cv2.waitKey(1) == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
# 发送串口数据
def send_com_data(self):
while True:
if self.serial_obj is None:
time.sleep(1)
continue
# 发送配置点
if __name__ == '__main__':
ship_obj = Ship()
init_cnts_lng_lat_to_pix = threading.Thread(target=ship_obj.init_cnts_lng_lat_to_pix,args=(False,))
get_com_thread = threading.Thread(target=ship_obj.get_com_data)
send_com_thread = threading.Thread(target=ship_obj.send_com_data)
#
# init_cnts_lng_lat_to_pix.setDaemon(True)
# get_com_thread.setDaemon(True)
# send_com_thread.setDaemon(True)
#
init_cnts_lng_lat_to_pix.start()
get_com_thread.start()
send_com_thread.start()
# init_cnts_lng_lat_to_pix.join()
# get_com_thread.join()
# send_com_thread.join()
# run_flask()
ship_obj.run_flask(debug=False)
|
test_win_runas.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import inspect
import io
import logging
import os
import socket
import subprocess
# Service manager imports
import sys
import textwrap
import threading
import time
import traceback
import salt.ext.six
import salt.utils.files
import salt.utils.win_runas
import yaml
from tests.support.case import ModuleCase
from tests.support.helpers import with_system_user
from tests.support.mock import Mock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
try:
import win32service
import win32serviceutil
import win32event
import servicemanager
import win32api
CODE_DIR = win32api.GetLongPathName(RUNTIME_VARS.CODE_DIR)
HAS_WIN32 = True
except ImportError:
# Mock win32serviceutil object to avoid
# a stacktrace in the _ServiceManager class
win32serviceutil = Mock()
HAS_WIN32 = False
logger = logging.getLogger(__name__)
PASSWORD = "P@ssW0rd"
NOPRIV_STDERR = "ERROR: Logged-on user does not have administrative privilege.\n"
PRIV_STDOUT = (
"\nINFO: The system global flag 'maintain objects list' needs\n "
"to be enabled to see local opened files.\n See Openfiles "
"/? for more information.\n\n\nFiles opened remotely via local share "
"points:\n---------------------------------------------\n\n"
"INFO: No shared open files found.\n"
)
if HAS_WIN32:
RUNAS_PATH = os.path.abspath(os.path.join(CODE_DIR, "runas.py"))
RUNAS_OUT = os.path.abspath(os.path.join(CODE_DIR, "runas.out"))
def default_target(service, *args, **kwargs):
while service.active:
time.sleep(service.timeout)
class _ServiceManager(win32serviceutil.ServiceFramework):
"""
A windows service manager
"""
_svc_name_ = "Service Manager"
_svc_display_name_ = "Service Manager"
_svc_description_ = "A Service Manager"
run_in_foreground = False
target = default_target
def __init__(self, args, target=None, timeout=60, active=True):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.timeout = timeout
self.active = active
if target is not None:
self.target = target
@classmethod
def log_error(cls, msg):
if cls.run_in_foreground:
logger.error(msg)
servicemanager.LogErrorMsg(msg)
@classmethod
def log_info(cls, msg):
if cls.run_in_foreground:
logger.info(msg)
servicemanager.LogInfoMsg(msg)
@classmethod
def log_exception(cls, msg):
if cls.run_in_foreground:
logger.exception(msg)
exc_info = sys.exc_info()
tb = traceback.format_tb(exc_info[2])
servicemanager.LogErrorMsg("{} {} {}".format(msg, exc_info[1], tb))
@property
def timeout_ms(self):
return self.timeout * 1000
def SvcStop(self):
"""
Stop the service by; terminating any subprocess call, notify
windows internals of the stop event, set the instance's active
attribute to 'False' so the run loops stop.
"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
self.active = False
def SvcDoRun(self):
"""
Run the monitor in a separete thread so the main thread is
free to react to events sent to the windows service.
"""
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ""),
)
self.log_info("Starting Service {}".format(self._svc_name_))
monitor_thread = threading.Thread(target=self.target_thread)
monitor_thread.start()
while self.active:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout_ms)
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
self.log_info("Stopping Service")
break
if not monitor_thread.is_alive():
self.log_info("Update Thread Died, Stopping Service")
break
def target_thread(self, *args, **kwargs):
"""
Target Thread, handles any exception in the target method and
logs them.
"""
self.log_info("Monitor")
try:
self.target(self, *args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
# TODO: Add traceback info to windows event log objects
self.log_exception("Exception In Target")
@classmethod
def install(cls, username=None, password=None, start_type=None):
if hasattr(cls, "_svc_reg_class_"):
svc_class = cls._svc_reg_class_
else:
svc_class = win32serviceutil.GetServiceClassString(cls)
win32serviceutil.InstallService(
svc_class,
cls._svc_name_,
cls._svc_display_name_,
description=cls._svc_description_,
userName=username,
password=password,
startType=start_type,
)
@classmethod
def remove(cls):
win32serviceutil.RemoveService(cls._svc_name_)
@classmethod
def start(cls):
win32serviceutil.StartService(cls._svc_name_)
@classmethod
def restart(cls):
win32serviceutil.RestartService(cls._svc_name_)
@classmethod
def stop(cls):
win32serviceutil.StopService(cls._svc_name_)
def service_class_factory(
cls_name,
name,
target=default_target,
display_name="",
description="",
run_in_foreground=False,
):
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if salt.ext.six.PY2:
cls_name = cls_name.encode()
return type(
cls_name,
(_ServiceManager, object),
{
"__module__": mod.__name__,
"_svc_name_": name,
"_svc_display_name_": display_name or name,
"_svc_description_": description,
"run_in_foreground": run_in_foreground,
"target": target,
},
)
if HAS_WIN32:
test_service = service_class_factory("test_service", "test service")
SERVICE_SOURCE = """
from __future__ import absolute_import, unicode_literals
import logging
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
from tests.integration.utils.test_win_runas import service_class_factory
import salt.utils.win_runas
import sys
import yaml
OUTPUT = {}
USERNAME = '{}'
PASSWORD = '{}'
def target(service, *args, **kwargs):
service.log_info("target start")
if PASSWORD:
ret = salt.utils.win_runas.runas(
'cmd.exe /C OPENFILES',
username=USERNAME,
password=PASSWORD,
)
else:
ret = salt.utils.win_runas.runas(
'cmd.exe /C OPENFILES',
username=USERNAME,
)
service.log_info("win_runas returned %s" % ret)
with open(OUTPUT, 'w') as fp:
yaml.dump(ret, fp)
service.log_info("target stop")
# This class will get imported and run as the service
test_service = service_class_factory('test_service', 'test service', target=target)
if __name__ == '__main__':
try:
test_service.stop()
except Exception as exc: # pylint: disable=broad-except
logger.debug("stop service failed, this is ok.")
try:
test_service.remove()
except Exception as exc: # pylint: disable=broad-except
logger.debug("remove service failed, this os ok.")
test_service.install()
sys.exit(0)
"""
def wait_for_service(name, timeout=200):
start = time.time()
while True:
status = win32serviceutil.QueryServiceStatus(name)
if status[1] == win32service.SERVICE_STOPPED:
break
if time.time() - start > timeout:
raise TimeoutError(
"Timeout waiting for service"
) # pylint: disable=undefined-variable
time.sleep(0.3)
@skipIf(not HAS_WIN32, "This test runs only on windows.")
class RunAsTest(ModuleCase):
@classmethod
def setUpClass(cls):
super(RunAsTest, cls).setUpClass()
cls.hostname = socket.gethostname()
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username, PASSWORD)
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_no_pass(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username)
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_admin(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username, PASSWORD)
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_admin_no_pass(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username)
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
def test_runas_system_user(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "SYSTEM")
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
def test_runas_network_service(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "NETWORK SERVICE")
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
def test_runas_local_service(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "LOCAL SERVICE")
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_winrs(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_winrs_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_winrs_admin(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_winrs_admin_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 0)
def test_runas_winrs_system_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'SYSTEM')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 0)
def test_runas_winrs_network_service_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'NETWORK SERVICE')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 1)
def test_runas_winrs_local_service_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'LOCAL SERVICE')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
"cmd.exe /C winrs /r:{} python {}".format(self.hostname, RUNAS_PATH),
shell=True,
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_powershell_remoting(self, username):
psrp_wrap = "powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}"
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_powershell_remoting_no_pass(self, username):
psrp_wrap = "powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}"
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_powershell_remoting_admin(self, username):
psrp_wrap = "powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}; exit $LASTEXITCODE"
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
ret = salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)
sys.exit(ret['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}; exit $LASTEXITCODE".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True)
self.assertEqual(ret, 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_powershell_remoting_admin_no_pass(self, username):
psrp_wrap = "powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}; exit $LASTEXITCODE"
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}; exit $LASTEXITCODE".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True)
self.assertEqual(ret, 0)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_service(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, PASSWORD)
with io.open(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(cmd, shell=True)
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 1, ret
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_service_no_pass(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, "")
with io.open(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(cmd, shell=True)
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 1, ret
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_service_admin(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, PASSWORD)
with io.open(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(cmd, shell=True)
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_service_admin_no_pass(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, "")
with io.open(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(cmd, shell=True)
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
def test_runas_service_system_user(self):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), "SYSTEM", "")
with io.open(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
cmd = "python.exe {}".format(RUNAS_PATH)
ret = subprocess.call(cmd, shell=True)
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise TwisterRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
gen_defines_args = "--err-on-deprecated-properties"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid")
instance.handler.call_west_flash = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = [p.name for p in self.platforms]
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
if instance.status not in ['passed', 'skipped', 'error']:
logger.debug(f"adding {instance.name}")
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
# FIXME: This needs to move out.
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(self.calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if instance.results[k] in ["PASS"]:
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
else:
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty):
device = DUT(platform=platform, connected=True, pre_script=pre_script)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown')
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
adapters.py
|
import cvra_bootloader.can
import socket
import struct
import serial
from queue import Queue
import threading
class SocketCANConnection:
# See <linux/can.h> for format
CAN_FRAME_FMT = "=IB3x8s"
CAN_FRAME_SIZE = struct.calcsize(CAN_FRAME_FMT)
def __init__(self, interface, read_timeout=1):
"""
Initiates a CAN connection on the given interface (e.g. 'can0').
"""
# Creates a raw CAN connection and binds it to the given interface.
self.socket = socket.socket(socket.AF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.socket.bind((interface,))
self.socket.settimeout(read_timeout)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 4096)
def send_frame(self, frame):
data = frame.data.ljust(8, b"\x00")
data = struct.pack(self.CAN_FRAME_FMT, frame.id, len(frame.data), data)
self.socket.send(data)
def receive_frame(self):
try:
frame, _ = self.socket.recvfrom(self.CAN_FRAME_SIZE)
except socket.timeout:
return None
can_id, can_dlc, data = struct.unpack(self.CAN_FRAME_FMT, frame)
return cvra_bootloader.can.Frame(id=can_id, data=data[:can_dlc])
class SerialCANConnection:
"""
Implements the slcan API.
"""
MIN_MSG_LEN = len("t1230")
def __init__(self, port, read_timeout=1):
self.port = port
self.timeout = read_timeout
self.rx_queue = Queue()
t = threading.Thread(target=self.spin)
t.daemon = True
t.start()
self.send_command("S8")
# bitrate 1Mbit
self.send_command("O")
# open device
port.reset_input_buffer()
def spin(self):
part = ""
while True:
part += self.port.read(100).decode("ascii")
if part.startswith("\r"):
part.lstrip("\r")
if "\r" not in part:
continue
data = part.split("\r")
data, part = data[:-1], data[-1]
for frame in data:
if frame is None:
continue
frame = self.decode_frame(frame)
if frame:
self.rx_queue.put(frame)
def send_command(self, cmd):
cmd += "\r"
cmd = cmd.encode("ascii")
self.port.write(cmd)
def decode_frame(self, msg):
if len(msg) < self.MIN_MSG_LEN:
return None
cmd, msg = msg[0], msg[1:]
if cmd == "T":
extended = True
id_len = 8
elif cmd == "t":
extended = False
id_len = 3
else:
return None
if len(msg) < id_len + 1:
return None
can_id = int(msg[0:id_len], 16)
msg = msg[id_len:]
data_len = int(msg[0])
msg = msg[1:]
if len(msg) < 2 * data_len:
return None
data = [int(msg[i : i + 2], 16) for i in range(0, 2 * data_len, 2)]
return can.Frame(
id=can_id, data=bytearray(data), data_length=data_len, extended=extended
)
def encode_frame(self, frame):
if frame.extended:
cmd = "T"
can_id = "{:08x}".format(frame.id)
else:
cmd = "t"
can_id = "{:03x}".format(frame.id)
length = "{:x}".format(frame.data_length)
data = ""
for b in frame.data:
data += "{:02x}".format(b)
return cmd + can_id + length + data
def send_frame(self, frame):
cmd = self.encode_frame(frame)
self.send_command(cmd)
def receive_frame(self):
try:
# according to the datasheet, erasing a sector from an stm32f407
# can take up to 4 seconds. Therefore we need to wait a bit
return self.rx_queue.get(True, self.timeout)
except:
return None
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html.j2', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
predict_datapoint.py
|
import glob
import multiprocessing
import os
import subprocess
import argparse
import h5py
import tensorflow as tf
import numpy as np
from src.TreeModel import TreeModel
from src.SettingsReader import SettingsReader
def convert_to_uint16(input, threshold=0.1):
if input.dtype != np.uint16:
max_value = float(np.iinfo(np.uint16).max) * 0.5
return (input / threshold * max_value + max_value).astype(np.uint16)
return input
def denormalize_input(input, mean_img, normal_mean_img):
changed = np.transpose(input, (1, 2, 0))
img = changed[:, :, :3] * 150 + mean_img
normal_img = changed[:, :, 3:] + normal_mean_img
return np.concatenate([img, normal_img], axis=2)
def denormalize_output(input):
return np.transpose(input, (1, 2, 3, 0))
class OutputGenerator(object):
def __init__(self, mean_img, normal_mean_img, folder_path_to_save_to):
self._mean_img = mean_img
self._normal_mean_img = normal_mean_img
self._path_collection = []
self._folder_path_to_save_to = folder_path_to_save_to
self.final_paths = []
def _save_voxel_unencoded(self, output, file_path, image):
output = denormalize_output(output)
np.save(file_path, output)
self._path_collection.append((file_path, image))
def generate_outputs(self, image, output, name):
image = denormalize_input(image, self._mean_img, self._normal_mean_img)
name = name[:name.rfind(".")]
file_path = os.path.join(self._folder_path_to_save_to, 'output_' + str(name) + ".npy")
self._save_voxel_unencoded(output, file_path, image)
def decode_outputs(self):
print("Start decoding")
path = os.path.join(self._folder_path_to_save_to, 'output_@.npy')
decode_script = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "CompressionAutoEncoder", "decode.py"))
cmd = "python {} {} --store_as_npy --empty_block_detection_threshold=1e-2".format(decode_script, path)
print(cmd)
subprocess.call(cmd, shell=True)
for path, image in self._path_collection:
decoded_path = path[:path.find('.')] + '_decoded.npy'
if not os.path.exists(decoded_path):
raise Exception("This file was not decoded: {}".format(decoded_path))
data = np.load(decoded_path)
data = convert_to_uint16(data)
os.remove(decoded_path)
os.remove(path)
result_path = path.replace(".npy", ".hdf5")
with h5py.File(result_path, 'w') as output:
output.create_dataset("voxelgrid", data=data, compression='gzip')
output.create_dataset("colors", data=image, compression='gzip')
print("Stored final voxelgrid in {}".format(result_path))
self.final_paths.append(result_path)
def predict_some_sample_points(image_paths, model_path, folder_path_to_save_to, use_pretrained_weights,
use_gen_normals):
if not os.path.exists(model_path + ".meta"):
raise Exception("Model does not exist: " + model_path)
if len(image_paths) == 0:
raise Exception("The list of .hdf5 containers is empty!")
folder_path_to_save_to = os.path.abspath(folder_path_to_save_to)
data_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
settings_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "settings_file.yml"))
if os.path.exists(settings_file_path):
settings = SettingsReader(settings_file_path, data_folder)
else:
raise Exception("The settings file could not be found!")
with h5py.File(os.path.join(data_folder, 'color_normal_mean.hdf5'), 'r') as data:
mean_img = np.array(data["color"])
normal_mean_img = np.array(data["normal"])
settings.batch_size = 1
settings.shuffle_size = 1
settings.use_augmentations = False
used_shape = [None, 3, settings.img_shape[1], settings.img_shape[2]]
color_input = tf.placeholder(tf.float32, used_shape)
normal_input = tf.placeholder(tf.float32, used_shape)
input_to_network = tf.concat([color_input, normal_input], axis=1)
model = TreeModel(input=input_to_network, settings=settings)
last_layer = model.last_layer
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def run_tf(i_unused, return_dict):
def collect_results(sess):
result = [[], [], []]
current_config = [input_to_network, last_layer]
counter = 0
for image_path in image_paths:
if not os.path.exists(image_path) or not image_path.endswith(".hdf5"):
continue
with h5py.File(image_path, "r") as file:
if "colors" in file.keys():
color_o = np.array(file["colors"], dtype=np.float32) - mean_img
color_o /= 150.
color_o = np.expand_dims(np.transpose(color_o, (2, 0, 1)), axis=0) # channel first
if "normals" in file.keys() or "normal_gen" in file.keys():
if not use_gen_normals and "normals" in file.keys():
normal_o = np.array(file["normals"], dtype=np.float32)
elif use_gen_normals and "normal_gen" in file.keys():
normal_o = np.array(file["normal_gen"], dtype=np.float32)
else:
continue
# the original training data, was in sRGB, this has to be recreated
if use_pretrained_weights:
normal_o = np.power(normal_o, 2.2)
normal_o -= normal_mean_img
normal_o = np.expand_dims(np.transpose(normal_o, (2, 0, 1)), axis=0)
else:
continue
quit_break = False
res = sess.run(current_config, feed_dict={color_input: color_o, normal_input: normal_o})
for i in range(2):
result[i].append(res[i][0])
result[2].append(os.path.basename(image_path))
counter += 1
print("Done with: {} of {}".format(counter, len(image_paths)))
if quit_break:
break
return result
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, model_path)
results = collect_results(sess)
return_dict['result'] = (results,)
print("Session is done")
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(target=run_tf, args=(0, return_dict))
p.start()
p.join()
results = return_dict['result'][0]
og = OutputGenerator(mean_img, normal_mean_img, folder_path_to_save_to)
for i in range(len(results[0])):
print(i)
og.generate_outputs(results[0][i], results[1][i], results[2][i])
og.decode_outputs()
print('\n' + folder_path_to_save_to)
return og.final_paths
if __name__ == "__main__":
parser = argparse.ArgumentParser("This predicts based on an .hdf5 container with a color and normal image "
"a TSDF voxelgrid. The model should be in 'model/model.ckpt'")
parser.add_argument("path", help="Path to the .hdf5 container, all @ will be replaced with *.")
parser.add_argument("--output", help="Path to where to save the output files", required=True)
parser.add_argument("--use_gen_normal", help="Use a generated normal image. Could be generated "
"with UNetNormalGen.", action="store_true")
parser.add_argument("--use_pretrained_weights", help="Use the pretrained weight mode.", action="store_true")
args = parser.parse_args()
hdf5_paths = glob.glob(args.path)
if not os.path.exists(args.output):
os.makedirs(args.output)
model_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "model", "model.ckpt"))
predict_some_sample_points(hdf5_paths, model_path, args.output, args.use_pretrained_weights, args.use_gen_normal)
print("You can view these files now with the TSDFRenderer.")
|
material_unload_server.py
|
#! /usr/bin/env python
"""
Copyright 2013 Southwest Research Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import logging
import threading
import time
import collections
from httplib import HTTPConnection
from xml.etree import ElementTree
import roslib; roslib.load_manifest('mtconnect_msgs')
import rospy
import actionlib
import mtconnect_msgs.msg
class MaterialUnloadServer():
"""Dedicated Material Unload Server -- without robot interface
Class establishes a simple action server for the MaterialUnloadAction, and
starts a thread to subscribe to the ROS topic CncStatus.
The Material Unload sequence is completed once the door state and the chuck
state are open.
@param server_name: string, 'MaterialUnload'
@param _result: instance of the mtconnect_msgs.msg.MaterialUnloadResult class
@param _as: ROS actionlib SimpleActionServer
@param door_state: int, published value for the CNC door state [0:Open, 1:Closed, -1:Unlatched]
@param chuck_state: int, published value for the CNC chuck state [0:Open, 1:Closed, -1:Unlatched]
@param sub_thread: thread that launches a ROS subscriber to the CncResponseTopic via bridge_publisher node
"""
def __init__(self):
self.server_name = 'MaterialUnload'
self._result = mtconnect_msgs.msg.MaterialUnloadResult()
self._as = actionlib.SimpleActionServer('MaterialUnloadClient', mtconnect_msgs.msg.MaterialUnloadAction, self.execute_cb, False)
self._as.start()
self._as.accept_new_goal()
self.counter = 1
# Subscribe to CNC state topic
self.door_state = None
self.chuck_state = None
self.open_chuck = None
# Check for CncResponseTopic
dwell = 2
while True:
published_topics = dict(rospy.get_published_topics())
if '/CncResponseTopic' in published_topics.keys():
rospy.loginfo('ROS CncResponseTopic available, starting subscriber')
break
else:
rospy.loginfo('ROS CncResponseTopic not available, will try to subscribe in %d seconds' % dwell)
time.sleep(dwell)
# Create ROS Subscriber thread
sub_thread = threading.Thread(target = self.subscriber_thread)
sub_thread.daemon = True
sub_thread.start()
def execute_cb(self, goal):
rospy.loginfo('In %s Bridge Server Callback -- determining action request result.' % self.server_name)
# Initialize timeout parameter
start = time.time()
# Start while loop and check for cnc action changes
rospy.loginfo('In MaterialUnload Server while loop')
previous = time.time()
dwell = True
while dwell == True:
try:
# Current CNC state
if time.time() - previous > 1.0:
rospy.loginfo('CNC States [door_state, chuck_state, open_chuck]: %s' % [self.door_state, self.chuck_state, self.open_chuck])
previous = time.time()
if self.door_state == 0 and self.chuck_state == 0 and self.open_chuck == 0:
# Chuck and Door are closed, complete the material load cycle
self._result.unload_state = 'COMPLETE'
dwell = False
rospy.loginfo('CNC States [door_state, chuck_state, open_chuck]: %s' % [self.door_state, self.chuck_state, self.open_chuck])
rospy.loginfo('CYCLE NUMBER --> %d' % self.counter)
self.counter += 1
# Check for timeout
if time.time() - start > 120.0:
rospy.loginfo('Material Unload Server Timed Out')
sys.exit()
except rospy.ROSInterruptException:
rospy.loginfo('program interrupted before completion')
# Indicate a successful action
self._as.set_succeeded(self._result)
rospy.loginfo('In %s Callback -- action succeeded. Result --> %s' % (self.server_name, self._result.unload_state))
return self._result
def topic_callback(self, msg):
self.door_state = msg.door_state.val
self.chuck_state = msg.chuck_state.val
self.open_chuck = msg.open_chuck.val
return
def subscriber_thread(self):
rospy.Subscriber('CncResponseTopic', mtconnect_msgs.msg.CncStatus, self.topic_callback)
rospy.spin()
return
if __name__ == '__main__':
# Initialize the ROS node
rospy.init_node('MaterialUnloadServer')
rospy.loginfo('Started ROS MaterialUnload Server')
# Launch the action server
server = MaterialUnloadServer()
rospy.spin()
|
keyboard_reader.py
|
from pynput.keyboard import Listener, Controller, Key
import time
import threading
class ComboListener:
def __init__(self):
self.cur_keys = []
self.keymap = {
'aaa': 'aster',
'bbb': 'good boy'
}
self._run()
def _on_press(self, key):
try:
self.cur_keys.append(key.char)
except AttributeError:
self.cur_keys.append(key.name)
def _run(self):
l = Listener(on_press=self._on_press)
l.daemon = True
l.start()
t = threading.Thread(target=self._clear)
t.daemon = True
t.start()
def _clear(self):
while True:
time.sleep(0.7)
self.cur_keys.clear()
def get_combo(self):
if len(self.cur_keys) >= 3:
combo = self.cur_keys[-3]
return combo
def parse_combo(self):
combo = self.get_combo()
if combo:
key = ''.join(combo)
if key in self.keymap.keys():
return self.keymap[key]
def send(content):
for _ in range(3):
c.press(Key.backspace)
c.type(content)
cl = ComboListener()
c = Controller()
while True:
combo = cl.parse_combo()
if combo:
print(combo)
send(combo)
|
settingswindow_qt.py
|
import numbers
import os
import sys
import threading
from xmlrpc.server import SimpleXMLRPCServer # pylint: disable=no-name-in-module
try: # Style C -- may be imported into Caster, or externally
BASE_PATH = os.path.realpath(__file__).rsplit(os.path.sep + "castervoice", 1)[0]
if BASE_PATH not in sys.path:
sys.path.append(BASE_PATH)
finally:
from castervoice.lib import printer
from castervoice.lib import settings
from castervoice.lib.merge.communication import Communicator
# TODO: Remove this try wrapper when CI server supports Qt
try:
import PySide2.QtCore
from PySide2.QtGui import QPalette
from PySide2.QtWidgets import QApplication
from PySide2.QtWidgets import QDialogButtonBox
from PySide2.QtWidgets import QCheckBox
from PySide2.QtWidgets import QDialog
from PySide2.QtWidgets import QFormLayout
from PySide2.QtWidgets import QGroupBox
from PySide2.QtWidgets import QLabel
from PySide2.QtWidgets import QLineEdit
from PySide2.QtWidgets import QScrollArea
from PySide2.QtWidgets import QTabWidget
from PySide2.QtWidgets import QVBoxLayout
from PySide2.QtWidgets import QWidget
except ImportError:
sys.exit(0)
settings.initialize()
DICT_SETTING = 1
STRING_SETTING = 2
STRING_LIST_SETTING = 4
NUMBER_LIST_SETTING = 8
NUMBER_SETTING = 16
BOOLEAN_SETTING = 32
CONTROL_KEY = PySide2.QtCore.Qt.Key_Meta if sys.platform == "darwin" else PySide2.QtCore.Qt.Key_Control
SHIFT_TAB_KEY = int(PySide2.QtCore.Qt.Key_Tab) + 1
RPC_COMPLETE_EVENT = PySide2.QtCore.QEvent.Type(PySide2.QtCore.QEvent.registerEventType(-1))
class Field:
def __init__(self, widget, original, text_type=None):
self.children = []
self.widget = widget
self.original = original
self.text_type = text_type
def add_child(self, field):
self.children.append(field)
class SettingsDialog(QDialog):
def __init__(self, server):
QDialog.__init__(self, None)
self.modifier = 0
self.server = server
self.setup_xmlrpc_server()
self.completed = False
self.fields = []
self.tabs = QTabWidget()
for top in sorted(settings.SETTINGS.keys()): # pylint: disable=no-member
self.tabs.addTab(self.make_tab(top), top)
buttons = QDialogButtonBox(
QDialogButtonBox.StandardButtons((int(QDialogButtonBox.StandardButton.Ok) |
int(QDialogButtonBox.StandardButton.Cancel))))
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.tabs)
mainLayout.addWidget(buttons)
self.setLayout(mainLayout)
self.setWindowTitle(settings.SETTINGS_WINDOW_TITLE +
settings.SOFTWARE_VERSION_NUMBER)
self.expiration = threading.Timer(300, self.xmlrpc_kill)
self.expiration.start()
def event(self, event):
if event.type() == PySide2.QtCore.QEvent.KeyRelease:
if self.modifier == 1:
curr = self.tabs.currentIndex()
tabs_count = self.tabs.count()
if event.key() == PySide2.QtCore.Qt.Key_Tab:
next = curr + 1
next = 0 if next == tabs_count else next
self.tabs.setCurrentIndex(next)
return True
elif event.key() == SHIFT_TAB_KEY:
next = curr - 1
next = tabs_count - 1 if next == -1 else next
self.tabs.setCurrentIndex(next)
return True
elif event.type() == RPC_COMPLETE_EVENT:
self.completed = True
self.hide()
return True
return QDialog.event(self, event)
def keyPressEvent(self, event):
if event.key() == CONTROL_KEY:
self.modifier |= 1
QDialog.keyPressEvent(self, event)
def keyReleaseEvent(self, event):
if event.key() == CONTROL_KEY:
self.modifier &= ~1
QDialog.keyReleaseEvent(self, event)
def make_tab(self, title):
area = QScrollArea()
field = Field(area, title)
area.setBackgroundRole(QPalette.Mid)
area.setWidgetResizable(True)
area.setWidget(self.add_fields(self, title, field))
self.fields.append(field)
return area
def add_fields(self, parent, title, field):
tab = QWidget(parent)
form = QFormLayout()
form.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
for label in sorted(settings.SETTINGS[title].keys()):
value = settings.SETTINGS[title][label]
subfield = Field(None, label)
subfield.widget = self.field_from_value(tab, value, subfield)
form.addRow(QLabel(label), subfield.widget)
field.add_child(subfield)
tab.setLayout(form)
return tab
def field_from_value(self, parent, value, field):
if isinstance(value, bool):
item = QCheckBox('')
item.setChecked(value)
return item
if isinstance(value, str):
field.text_type = STRING_SETTING
return QLineEdit(value)
if isinstance(value, numbers.Real):
field.text_type = NUMBER_SETTING
return QLineEdit(str(value))
if isinstance(value, list):
if isinstance(value[0], str):
field.text_type = STRING_LIST_SETTING
return QLineEdit(", ".join(value))
elif isinstance(value[0], numbers.Real):
field.text_type = NUMBER_LIST_SETTING
return QLineEdit(", ".join((str(x) for x in value)))
if isinstance(value, dict):
subpage = QGroupBox(parent)
form = QFormLayout()
for label in sorted(value.keys()):
subfield = Field(None, label)
subfield.widget = self.field_from_value(subpage, value[label], subfield)
field.add_child(subfield)
form.addRow(QLabel(label), subfield.widget)
subpage.setLayout(form)
return subpage
# This is left for bug reporting purposes.
printer.out("{} was not assigned to {} because type {} is unknown.".format(value, parent, type(value)))
return None
def tree_to_dictionary(self, t=None):
d = {}
children = self.fields if t is None else t.children
for field in children:
value = None
if isinstance(field.widget, QLineEdit):
value = field.widget.text()
if field.text_type == STRING_LIST_SETTING:
d[field.original] = [
x for x in value.replace(", ", ",").split(",") if x
] # don't count empty strings
elif field.text_type == NUMBER_LIST_SETTING:
temp_list = (
float(x) for x in value.replace(", ", ",").split(",") if x
) # don't count empty strings
d[field.original] = [int(x) if x.is_integer() else x for x in temp_list]
elif field.text_type == NUMBER_SETTING:
value = float(value)
if value.is_integer():
value = int(value)
d[field.original] = float(value)
else:
d[field.original] = value.replace("\\", "/")
elif isinstance(field.widget, (QScrollArea, QGroupBox)):
d[field.original] = self.tree_to_dictionary(field)
elif isinstance(field.widget, QCheckBox):
d[field.original] = field.widget.isChecked()
return d
def setup_xmlrpc_server(self):
self.server.register_function(self.xmlrpc_get_message, "get_message")
self.server.register_function(self.xmlrpc_complete, "complete")
self.server.register_function(self.xmlrpc_kill, "kill")
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def xmlrpc_kill(self):
self.expiration.cancel()
QApplication.quit()
def xmlrpc_get_message(self):
if self.completed:
threading.Timer(1, self.xmlrpc_kill).start()
return self.tree_to_dictionary()
else:
return None
def xmlrpc_complete(self):
PySide2.QtCore.QCoreApplication.postEvent(self, PySide2.QtCore.QEvent(RPC_COMPLETE_EVENT))
def accept(self):
self.xmlrpc_complete()
def reject(self):
self.xmlrpc_kill()
def main():
server_address = (Communicator.LOCALHOST, Communicator().com_registry["hmc"])
# Enabled by default logging causes RPC to malfunction when the GUI runs on
# pythonw. Explicitly disable logging for the XML server.
server = SimpleXMLRPCServer(server_address, logRequests=False, allow_none=True)
app = QApplication(sys.argv)
window = SettingsDialog(server)
window.show()
exit_code = app.exec_()
server.shutdown()
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
meteorDL-pi.py
|
# by Milan Kalina
# based on Tensorflow API v2 Object Detection code
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2.md
from os import path, system, mkdir, makedirs
import cv2
import time
import os
import argparse
import numpy as np
from datetime import datetime
from threading import Thread, Semaphore, Lock
from detector_tflite import DetectorTF2
import dvg_ringbuffer as rb
import configparser
from PIL import Image
import statistics as st
import multiprocessing as mp
import subprocess as sp
from queue import Queue
#import numpy as np
#import time
#import cumpy
class VideoStreamWidget(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update_rb, args=())
self.thread.daemon = True
self.thread.start()
def update_rb(self):
# Read the next frame from the stream in a different thread, builds and rotates the buffer
# and maintains buffer - list of consequtive frames
self.total = int(args.fps) * 5 # buffer size
self.k = 0 # global frame counter
self.j = 0 # maxpixel counter
self.t = [(0,0)] # time tracking
#mutex = Lock()
self.time0 = time.time()
(self.status, self.frame) = self.capture.read()
self.frame_width = self.frame.shape[1]
self.frame_height = self.frame.shape[0]
self.np_buffer = rb.RingBuffer(self.total, dtype=(np.uint8,(self.frame_height, self.frame_width, 3)), allow_overwrite=False)
print("Filling the ring buffer...")
self.buffer_fill()
print("Buffer filled, starting..." + str(self.np_buffer.shape))
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
#self.frame = self.frame[self.border:self.border+720,:]
if (self.status):
cv2.rectangle(self.frame, (0, (self.frame_height-10)), (200, self.frame_height), (0,0,0), -1)
cv2.putText(self.frame, station + ' ' + datetime.utcfromtimestamp(time.time()).strftime('%d/%m/%Y %H:%M:%S.%f')[:-4] + ' ' + str(self.k), (1,self.frame_height-3), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255,255,255), 1, cv2.LINE_AA)
#mutex.acquire()
self.np_buffer.popleft()
self.np_buffer.append(self.frame)
#mutex.release()
self.t = self.t[1:]
#self.t = np.roll(self.t, -1, axis=0)
self.t.append((self.k, time.time()))
self.k += 1
self.j += 1
else:
print("\n" + 'Connection lost... trying to restore...' + datetime.utcfromtimestamp(time.time()).strftime('%H:%M:%S'))
self.con_restore()
print('Connection restored...' + datetime.utcfromtimestamp(time.time()).strftime('%H:%M:%S'))
else:
print('Capture closed...')
def buffer_fill(self):
if self.capture.isOpened():
print('Filling buffer...')
while (self.status) and (len(self.t) < self.total+1):
(self.status, self.frame) = self.capture.read()
print(str(self.frame.shape) + ' ' + str(self.j) + ' ', end='\r', flush=True)
self.np_buffer.append(self.frame)
self.t.append((self.k, time.time()))
self.j += 1
self.k += 1
def check_ping(self):
#hostname = ip
response = system("ping -c 1 " + ip)
# and then check the response...
if response == 0:
pingstatus = True
else:
pingstatus = False
return pingstatus
def con_restore(self):
self.capture.release()
while not self.check_ping():
time.sleep(10)
self.capture = cv2.VideoCapture(source)
def saveArray(self):
while True:
# saves numpy array into video file
a = 0
# Lock the resources until free access to movie file
self.threadlock.acquire()
# wait until buffer received from queue
ar = self.q.get()
while a < ar.shape[0]:
#ar[a] = detector.DisplayDetections(ar[a], self.det_boxes)
#cv2.rectangle(ar[a], (0, (self.frame_height-10)), (300, self.frame_height), (0,0,0), -1)
#cv2.putText(ar[a], datetime.utcfromtimestamp(t[1]+a*0.04).strftime('%d/%m/%Y %H:%M:%S.%f')[:-4], (0,self.frame_height-3), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255,255,255), 1, cv2.LINE_AA)
self.out.write(ar[a])
a += 1
self.threadlock.release()
self.q.task_done()
def DetectFromStream(self, detector, save_output=False, output_dir='output/'):
self.mp = int(args.fps) # maxpixel size
self.mp1 = round(self.total/2 - self.mp/2) # maxpixel in the middle of the buffer
self.mp2 = self.mp1 + self.mp
#self.j = 0
time1 = 0
time2 = 1
t0 = t1 = t2 = t3 = t4 = 0
self.last_frame = 0
self.last_frame_recorded = 0
self.recording = False
mean = 0
# limit for detection trigger
perc30 = 0
mean_limit = 200
bg_max = 0.9
thr = 0.9
bg=[1,1,1,1,1,1,1,1,1]
# number of sec to be added for capture
#sec_post = 0
self.station = args.station
mask = False
# apply the mask if there is any
maskFile = 'mask-' + args.station + '.bmp'
if path.exists(maskFile):
print ('Loading mask...')
maskImage = Image.open(maskFile).convert('L')
maskImage = np.array(maskImage, dtype='uint8')
random_mask = np.random.rand(maskImage.shape[0],maskImage.shape[1],1) * 255
mask = True
else:
print ('No mask file found')
# start a queue for saving threads, then start a thread
self.q = Queue()
self.threadlock=Lock()
Thread(target=self.saveArray, daemon=True).start()
time.sleep(5)
while True:
# if new 1s chunk in the buffer is ready for detection
if (self.j >= self.mp) and (self.np_buffer.shape[0] >= self.total):
# new maxpixel frame to be tested for detection
self.j = 0
print ("detecting at fps={:2.1f}".format(self.mp/(time.time()-time1)) + ' | t=' + str(int(self.t[-1][0])) + ' | buffer=' + str(self.np_buffer.shape) + ' | maxpixel=' + str(self.mp) + ' | threshold=' + str(round((bg_max + margin) * 100)/100) + ' | t1=' + "{:1.3f}".format(t1-t0) + ' | t2=' + "{:1.3f}".format(t2-t1) + ' | t3=' + "{:1.3f}".format(t3-t2)+ ' | t4=' + "{:1.3f}".format(t4-t3) + ' | perc30=' + "{:.0f}".format(perc30) + ' ', end='\r', flush=True)
time1 = t0 = time.time()
# timestamp for file name, 1st frame of maxpixel image
t_frame1 = self.t[0][1]
# take 1s from middle of buffer to create maxpixel for detection, green channel omly for better performance
buffer_small = self.np_buffer[self.mp1:self.mp2,:,:,[1]]
t1 = time.time()
# calculate the maxpixel image
img = np.max(buffer_small, axis=0)
t2 = time.time()
perc30 = np.percentile(img, 30)
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
if mask:
# apply trick from RMS
img[maskImage < 3] = random_mask[maskImage < 3]
t3 = time.time()
self.det_boxes = detector.DetectFromImage(img)
img_clean = img
if self.det_boxes[0][5] > 0.1:
img = detector.DisplayDetections(img, self.det_boxes[:5])
t4 = time.time()
img_small = cv2.resize(img, (928, 522), interpolation = cv2.INTER_AREA)
cv2.imshow('Meteor detection', img_small)
key = cv2.waitKeyEx(1)
# trigger the saving if signal above the mean noise and sky background below the daytime brightness
if (self.det_boxes and perc30 < mean_limit):
if self.det_boxes[0][5] > (bg_max + margin):
if save_output:
# prepare file and folder for saving
subfolder = 'output/' + args.station + '_' + time.strftime("%Y%m%d", time.gmtime())
if not os.path.exists(subfolder):
os.mkdir(subfolder)
self.output_path = os.path.join(subfolder, station + '_' + datetime.utcfromtimestamp(t_frame1).strftime("%Y%m%d_%H%M%S_%f") + '_' + "{:0.0f}".format(100*self.det_boxes[0][5]) + '.mp4')
self.out = cv2.VideoWriter(self.output_path, cv2.VideoWriter_fourcc(*"mp4v"), 25, (img.shape[1], img.shape[0]))
# note the last frame
self.last_frame = self.t[-1][0]
# get the buffer copy to be saved
buffer = np.copy(self.np_buffer)
# create another saving task
self.q.put(buffer)
print ("\n" + 'Starting recording...', datetime.utcfromtimestamp(time.time()).strftime("%H:%M:%S.%f"), self.output_path, self.det_boxes[0], 'frame: ' + str(self.last_frame - buffer.shape[0]) + '-' + str(self.last_frame))
output_path_mp = os.path.join(subfolder, station + '_mp_' + datetime.utcfromtimestamp(t_frame1).strftime("%Y%m%d_%H%M%S_%f") + '.jpg')
output_path_mp_clean = os.path.join(subfolder, station + '_mp-clean_' + datetime.utcfromtimestamp(t_frame1).strftime("%Y%m%d_%H%M%S_%f") + '.jpg')
# save the maxpixel as jpg
cv2.imwrite(output_path_mp, img)
cv2.imwrite(output_path_mp_clean, img_clean)
self.last_frame_recorded = self.last_frame
# save another <post> seconds of frames
for s in range(3):
# wait until new data available
while self.t[-1][0] < (self.last_frame + 3*self.mp):
...
if self.t[-1][0] == (self.last_frame + 3*self.mp):
self.last_frame = self.t[-1][0]
buffer = np.copy(self.np_buffer[-3*self.mp:])
self.q.put(buffer)
print(f"Saving additional chunk: {self.last_frame-3*self.mp}-{self.last_frame}")
self.last_frame_recorded = self.last_frame
self.q.join()
self.out.release()
#print(f"Writer released...")
# update mean and max noise
bg = bg[-9:]
bg.append(self.det_boxes[0][5])
bg_max = max(bg[:9])
# update the screen
#img = cv2.resize(img, (928, 522), interpolation = cv2.INTER_AREA)
#cv2.imshow('Meteor detection', img)
# key handling, key codes valid pro RPi4
key = cv2.waitKeyEx(1)
if key == 1048603:
break
elif key == 1048689:
detector.Threshold += 0.1
elif key == 1048673:
detector.Threshold -= 0.1
time2 = time.time()
self.capture.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Object Detection from Images or Video')
parser.add_argument('--model_path', help='Path to frozen detection model', default='tflite-real-640-4/model_full_integer_quant_edgetpu.tflite')
parser.add_argument('--path_to_labelmap', help='Path to labelmap (.txt) file', default='labelmap.txt')
parser.add_argument('--class_ids', help='id of classes to detect, expects string with ids delimited by ","', type=str, default=None) # example input "1,3" to detect person and car
parser.add_argument('--threshold', help='Detection Threshold', type=float, default=0.01)
parser.add_argument('--images_dir', help='Directory to input images)', default='data/samples/images/')
parser.add_argument('--video_path', help='Path to input video)', default='data/samples/pedestrian_test.mp4')
parser.add_argument('--output_directory', help='Path to output images and video', default='output/')
parser.add_argument('--video_input', help='Flag for video input', default=False, action='store_true') # default is false
parser.add_argument('--save_output', help='Flag for save images and video with detections visualized', default=True, action='store_true') # default is false
parser.add_argument('--camera', help='camera number', default='10')
parser.add_argument('--station', help='station name', default='XX0XXXX')
parser.add_argument('--fps', help='fps', default=25)
args = parser.parse_args()
margin = 0.3
station = args.station
config = configparser.ConfigParser()
config.read('config.ini')
if station not in config:
station = 'default'
fps = int(config[station]['fps'])
ip = config[station]['ip']
# on Rpi4, FFMPEG backend is preffered
source = 'rtsp://' + config[station]['ip'] + ':' + config[station]['rtsp']
print(f"Streaming from {source}")
sec_post = int(config['general']['post_seconds'])
sec_pre = int(config['general']['pre_seconds'])
b_size = int(config['general']['buffer_size'])
id_list = None
if args.class_ids is not None:
id_list = [int(item) for item in args.class_ids.split(',')]
if args.save_output:
if not path.exists(args.output_directory):
makedirs(args.output_directory)
# instance of the class DetectorTF2
print("Starting detector...")
detector = DetectorTF2(args.model_path, args.path_to_labelmap, class_id=id_list, threshold=args.threshold)
print("detector started...")
if args.video_input:
DetectFromVideo(detector, args.video_path, save_output=args.save_output, output_dir=args.output_directory)
else:
# start the capture and wait to fill the buffer
video_stream_widget = VideoStreamWidget(source)
time.sleep(8)
# start detector
video_stream_widget.DetectFromStream(detector, save_output=args.save_output, output_dir=args.output_directory)
print("Done ...")
cv2.destroyAllWindows()
|
test.py
|
# -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
from includes import *
from common import getConnectionByEnv, waitForIndex, toSortedFlatList
# this tests is not longer relevant
# def testAdd(env):
# if env.is_cluster():
# raise unittest.SkipTest()
# r = env
# env.assertOk(r.execute_command(
# 'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# env.assertTrue(r.exists('idx:idx'))
# env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
# 'title', 'hello world',
# 'body', 'lorem ist ipsum'))
# for _ in r.retry_with_rdb_reload():
# prefix = 'ft'
# env.assertExists(prefix + ':idx/hello')
# env.assertExists(prefix + ':idx/world')
# env.assertExists(prefix + ':idx/lorem')
def testAddErrors(env):
env.expect('ft.create idx ON HASH schema foo text bar numeric sortable').equal('OK')
env.expect('ft.add idx doc1 1 redis 4').error().contains('Unknown keyword')
env.expect('ft.add idx doc1').error().contains("wrong number of arguments")
env.expect('ft.add idx doc1 42').error().contains("Score must be between 0 and 1")
env.expect('ft.add idx doc1 1.0').error().contains("No field list found")
env.expect('ft.add fake_idx doc1 1.0 fields foo bar').error().contains("Unknown index name")
def assertEqualIgnoreCluster(env, val1, val2):
# todo: each test that uses this function should be switch back to env.assertEqual once fix
# issues on coordinator
if env.isCluster():
return
env.assertEqual(val1, val2)
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx','ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", 'ON', 'HASH',
"SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx','ON', 'HASH',
'schema', 'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx','ON', 'HASH', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text').ok()
r.expect('ft.add', 'idx', 'doc1', 0.5,
'fields','title', 'hello world', 'body', 'lorem ist ipsum').ok()
r.expect('ft.add', 'idx', 'doc2', 1.0,
'fields', 'title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem').ok()
# order of documents might change after reload
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello')
expected = [2L, 'doc2', ['title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem'],
'doc1', ['title', 'hello world', 'body', 'lorem ist ipsum']]
env.assertEqual(toSortedFlatList(res), toSortedFlatList(expected))
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
expected = ['doc2', 'doc1']
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
# Test searching WITHSCORES
res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
env.assertTrue(float(res[2]) > 0)
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
env.assertTrue(float(res[2]) > 0)
env.assertTrue(float(res[4]) > 0)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'text'))
env.expect('ft.get').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx', 'foo', 'bar').error().contains("wrong number of arguments")
env.expect('ft.mget').error().contains("wrong number of arguments")
env.expect('ft.mget', 'idx').error().contains("wrong number of arguments")
env.expect('ft.mget', 'fake_idx').error().contains("wrong number of arguments")
env.expect('ft.get fake_idx foo').error().contains("Unknown Index name")
env.expect('ft.mget fake_idx foo').error().contains("Unknown Index name")
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res))
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
env.expect('ft.get', 'no_idx', 'doc0').error().contains("Unknown Index name")
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res))
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
assert r.cmd('ft.del', 'idx', 'coverage') == 0
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10')
r.assertEqual([None], res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
env.expect('ft.add idx doc 0.1 language arabic payload redislabs fields foo foo').ok()
env.expect('ft.get idx doc').equal(['foo', 'foo'])
res = env.cmd('hgetall doc')
env.assertEqual(set(res), set(['foo', 'foo', '__score', '0.1', '__language', 'arabic', '__payload', 'redislabs']))
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
env.expect('ft.del', 'fake_idx', 'doc1').error()
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# second delete should return 0
# TODO: return 0 if doc wasn't found
#env.assertEqual(0, r.execute_command(
# 'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
env.flush()
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
env.expect('FT.DROP', 'idx', 'KEEPDOCS', '666').error().contains("wrong number of arguments")
def testDelete(env):
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok()
for i in range(100):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497').ok()
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
r.expect('FT.DROPINDEX', 'idx', 'dd').ok()
keys = r.keys('*')
env.assertEqual(0, len(keys))
env.flush()
# Now do the same with KEEPDOCS
env.expect('ft.create', 'idx', 'ON', 'HASH',
'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok()
for i in range(100):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497').ok()
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
r.expect('FT.DROPINDEX', 'idx').ok()
keys = r.keys('*')
env.assertListEqual(sorted("doc%d" %k for k in range(100)), sorted(keys))
env.expect('FT.DROPINDEX', 'idx', 'dd', '666').error().contains("wrong number of arguments")
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'ON', 'HASH', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx2')
env.assertEqual(res[39], ['hello', 'world'])
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'ON', 'HASH', 'stopwords', 0,
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx3')
env.assertEqual(res[39], [])
#for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', 'idx', 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', 'idx', 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
if not env.isCluster():
# to specific check on cluster, todo : change it to be generic enough
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[7][1][4], 'NOINDEX')
env.assertEqual(res[7][2][6], 'NOINDEX')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'SCORE_FIELD', '__score',
'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
env.expect('ft.add', 'idx', 'doc12', '0.1', 'replace', 'partial',
'fields', 'num1', 'redis').equal('OK')
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3','extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
# We reindex though no new fields, just score is updated. this effects score
env.assertEqual(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeoErrors(env):
env.expect('flushall')
env.expect('ft.create idx ON HASH schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').equal('OK')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 km').equal([0L])
# Insert error - works fine with out of keyspace implementation
# env.expect('ft.add', 'idx', 'hotel1', 1, 'fields', 'name', '_hotel1', 'location', '1, 1').error() \
# .contains('Could not index geo value')
# Query errors
env.expect('ft.search idx hilton geofilter location lon 51.5156 1 km').error() \
.contains('Bad arguments for <lon>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location 51.5156 lat 1 km').error() \
.contains('Bad arguments for <lat>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 radius km').error() \
.contains('Bad arguments for <radius>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 fake').error() \
.contains('Unknown distance unit fake')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1').error() \
.contains('GEOFILTER requires 5 arguments')
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit, 'LIMIT', 0, 20)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit), 'LIMIT', 0, 20)
env.assertOk(r.execute_command('ft.create', 'idx', 'ON', 'HASH',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
env.assertEqual(3, res[0])
env.assertIn('hotel2', res)
env.assertIn('hotel21', res)
env.assertIn('hotel79', res)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(sorted(res), sorted(res2))
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res2[0])
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(sorted(res), sorted(res2))
def testTagErrors(env):
env.expect("ft.create", "test", 'ON', 'HASH',
"SCHEMA", "tags", "TAG").equal('OK')
env.expect("ft.add", "test", "1", "1", "FIELDS", "tags", "alberta").equal('OK')
env.expect("ft.add", "test", "2", "1", "FIELDS", "tags", "ontario. alberta").equal('OK')
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.expect('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0).ok()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'fields',
'g1', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(3, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
for _ in range(100):
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
for _ in range(100):
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(0, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc',
'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
#todo: document as breaking change, ft.add fields name are not case insentive
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'TiTle', 'hello world', 'BoDy', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'BoDy', 'hello world', 'TiTle', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@TiTle:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello @TiTle:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello world @TiTle:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @TiTle:(world|bar)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo world|bar)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@BoDy|TiTle:(hello world)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5).error().contains("FILTER requires 3 arguments")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5, 'inf').error().contains("Bad upper range: inf")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 'inf', 5).error().contains("Bad lower range: inf")
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
r.expect('ft.SUGADD', 'ac', 'hello world', 1).equal(1)
r.expect('ft.SUGADD', 'ac', 'hello world', 1, 'INCR').equal(1)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
r.expect('ft.SUGADD', 'ac', term, sz - 1).equal(sz)
sz += 1
for _ in r.retry_with_rdb_reload():
r.expect('ft.SUGLEN', 'ac').equal(7)
# search not fuzzy
r.expect("ft.SUGGET", "ac", "hello").equal(["hello world", "hello werld"])
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
r.expect("ft.SUGGET", "ac", "hello", "FUZZY")\
.equal(['hello world', 'hello werld', 'yellow world', 'hallo world'])
# search fuzzy with limit of 1
r.expect("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1").equal(['hello world'])
# scores should return on WITHSCORES
res = r.execute_command("ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(res))
env.assertTrue(float(res[1]) > 0)
env.assertTrue(float(res[3]) > 0)
r.expect("ft.SUGDEL", "ac", "hello world").equal(1L)
r.expect("ft.SUGDEL", "ac", "world").equal(0L)
r.expect("ft.SUGGET", "ac", "hello").equal(['hello werld'])
def testSuggestErrors(env):
env.expect('ft.SUGADD ac olah 1').equal(1)
env.expect('ft.SUGADD ac olah 1 INCR').equal(1)
env.expect('ft.SUGADD ac missing').error().contains("wrong number of arguments")
env.expect('ft.SUGADD ac olah not_a_number').error().contains("invalid score")
env.expect('ft.SUGADD ac olah 1 PAYLOAD').error().contains('Invalid payload: Expected an argument, but none provided')
env.expect('ft.SUGADD ac olah 1 REDIS PAYLOAD payload').error().contains('Unknown argument `REDIS`')
env.expect('ft.SUGGET ac olah FUZZ').error().contains("Unrecognized argument: FUZZ")
query = 'verylongquery'
for _ in range(3):
query += query
env.expect('ft.SUGGET ac', query).error().contains("Invalid query")
env.expect('ft.SUGGET ac', query + query).error().contains("Invalid query length")
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.expect('ft.create', 'idx', 'ON', 'HASH', 'PAYLOAD_FIELD', '__payload', 'schema', 'f', 'text').ok()
for i in range(10):
r.expect('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world').ok()
for x in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello world')
r.assertEqual(21, len(res))
res = r.execute_command('ft.search', 'idx', 'hello world', 'withpayloads')
r.assertEqual(31, len(res))
r.assertEqual(10, res[0])
for i in range(1, 30, 3):
r.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text').ok()
waitForIndex(r, 'idx')
for i in range(N):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))).ok()
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
r.expect('ft.del', 'idx', 'doc%d' % i).equal(1)
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
waitForIndex(env, 'idx')
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
# RS 2.0 ft.drop does not remove documents
env.flush()
except Exception as e:
pass
options = ['idx'] + options + ['ON', 'HASH', 'schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
env.expect('ft.create', 'idx').error()
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 'body', 'text', 'name', 'text', 'nostem')
if not env.isCluster():
# todo: change it to be more generic to pass on is_cluster
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[7][1][5], 'NOSTEM')
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'ON', 'HASH', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
waitForIndex(env, 'idx')
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
for r in res:
env.assertIn(r, exp)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text').ok()
env.expect('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'txt', 'foo', 'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3').ok()
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
# As of RS 2.0 it is allowed. only latest field will be saved and indexed
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC', 'SORTABLE')
env.expect('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz').ok()
env.expect('FT.SEARCH idx *').equal([1L, 'doc', ['txt', 'baz']])
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'f1', 'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'SCHEMA', 'lastName', 'text', 'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
env.skip() # addhash isn't supported
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
# RS 2.0 reindex and after reload both documents are found
# for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([2L, 'doc2', ['f1', 'hello', 'f2', 'world'], 'doc1', ['f1', 'hello', 'f2', 'world']]))
# env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'],
'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'],
'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', ['f1', 'hello', 'f3', 'val4'],
'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'],
'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'NOT_ADD', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2').error()
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
waitForIndex(env, 'idx2')
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'ON', 'HASH', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH',
'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH',
'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'ON', 'HASH', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
waitForIndex(env, 'idx')
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc1', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'ON', 'HASH', 'PREFIX', 1, 'doc2', 'schema', 't1', 'text')
env.expect('ft.aliasAdd', 'myIndex').raiseError()
env.expect('ft.aliasupdate', 'fake_alias', 'imaginary_alias', 'Too_many_args').raiseError()
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# RS2 does not delete doc on ft.drop
conn.execute_command('DEL', 'doc1')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'ON', 'HASH', 'PREFIX', 1, 'doc3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
waitForIndex(env, 'myIndex')
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
# Test update
env.expect('ft.aliasAdd', 'updateIndex', 'idx3')
env.expect('ft.aliasUpdate', 'updateIndex', 'fake_idx')
r = env.cmd('ft.del', 'idx2', 'doc2')
env.assertEqual(1, r)
env.expect('ft.aliasdel').raiseError()
env.expect('ft.aliasdel', 'myIndex', 'yourIndex').raiseError()
env.expect('ft.aliasdel', 'non_existing_alias').raiseError()
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'schema', 'f1').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
def testSpellCheck(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111')
env.assertEqual([['TERM', '111111', []]], rv)
if not env.isCluster():
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111', 'FULLSCOREINFO')
env.assertEqual([1L, ['TERM', '111111', []]], rv)
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'ON', 'HASH', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
env.expect('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'FAKE_COMMAND', 'slang').error()
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'ON', 'HASH', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
res = env.cmd('ft.search', 'test', '@uuid:{foo}')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'a', ['uuid', 'foo', 'title', 'bar']]))
# Server crash on doc names that conflict with index keys #666
# again this test is not relevant cause index is out of key space
# def testIssue666(env):
# # We cannot reliably determine that any error will occur in cluster mode
# # because of the key name
# env.skipOnCluster()
# env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
# env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# # crashes here
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# # try with replace:
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
# 'FIELDS', 'bar', 'four five six')
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
# 'FIELDS', 'bar', 'four five six')
# env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testPrefixDeletedExpansions(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
# print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual(toSortedFlatList([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']]), toSortedFlatList(r))
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
def testIssue736(env):
#for new RS 2.0 ft.add does not return certian errors
env.skip()
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
env.expect('ft.search', 'idx', '(hey hello1)|(hello2 hey)').equal([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']])
def testIssue828(env):
env.cmd('ft.create', 'beers', 'ON', 'HASH', 'SCHEMA',
'name', 'TEXT', 'PHONETIC', 'dm:en',
'style', 'TAG', 'SORTABLE',
'abv', 'NUMERIC', 'SORTABLE')
rv = env.cmd("FT.ADD", "beers", "802", "1.0",
"FIELDS", "index", "25", "abv", "0.049",
"name", "Hell or High Watermelon Wheat (2009)",
"style", "Fruit / Vegetable Beer")
env.assertEqual('OK', rv)
def testIssue862(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo")
env.assertEqual('OK', rv)
env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS')
env.assertTrue(env.isUp())
def testIssue_884(env):
env.expect('FT.create', 'idx', 'ON', 'HASH', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight',
'50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight',
'10', 'description', 'text', 'weight', '20').equal('OK')
env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK')
env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK')
expected = [2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']]
res = env.cmd('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}')
env.assertEquals(len(expected), len(res))
for v in expected:
env.assertContains(v, res)
def testIssue_866(env):
env.expect('ft.sugadd', 'sug', 'test123', '1').equal(1)
env.expect('ft.sugadd', 'sug', 'test456', '1').equal(2)
env.expect('ft.sugdel', 'sug', 'test').equal(0)
env.expect('ft.sugget', 'sug', '').equal(['test123', 'test456'])
def testIssue_848(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK')
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK')
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc1', ['test1', 'foo'], 'doc2', ['test2', 'bar', 'test1', 'foo']])
def testMod_309(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
for i in range(100000):
env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo')
env.assertEqual(len(res), 100001)
def testMod_309_with_cursor(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
for i in range(100000):
env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo', 'WITHCURSOR')
l = len(res[0]) - 1 # do not count the number of results (the first element in the results)
cursor = res[1]
while cursor != 0:
r, cursor = env.cmd('FT.CURSOR', 'READ', 'idx', str(cursor))
l += (len(r) - 1)
env.assertEqual(l, 100000)
def testIssue_865(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error()
def testIssue_779(env):
# FT.ADD should return NOADD and not change the doc if value < same_value, but it returns OK and makes the change.
# Note that "greater than" ">" does not have the same bug.
env.cmd('FT.CREATE idx2 ON HASH SCHEMA ot1 TAG')
env.cmd('FT.ADD idx2 doc2 1.0 FIELDS newf CAT ot1 4001')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"]))
# NOADD is expected since 4001 is not < 4000, and no updates to the doc2 is expected as a result
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4000 FIELDS newf DOG ot1 4000', 'NOADD')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"]))
# OK is expected since 4001 < 4002 and the doc2 is updated
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf DOG ot1 4002').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"]))
# OK is NOT expected since 4002 is not < 4002
# We expect NOADD and doc2 update; however, we get OK and doc2 updated
# After fix, @ot1 implicitly converted to a number, thus we expect NOADD
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if to_number(@ot1)<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_str(4002) FIELDS newf FISH ot1 4002').equal('NOADD')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"]))
# OK and doc2 update is expected since 4002 < 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4003 FIELDS newf HORSE ot1 4003').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "HORSE", "ot1", "4003"]))
# Expect NOADD since 4003 is not > 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4003 FIELDS newf COW ot1 4003').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if 4003<@ot1 FIELDS newf COW ot1 4003').equal('NOADD')
# Expect OK and doc2 updated since 4003 > 4002
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4002 FIELDS newf PIG ot1 4002').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "PIG", "ot1", "4002"]))
# Syntax errors
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4-002 FIELDS newf DOG ot1 4002').contains('Syntax error')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_number(4-002) FIELDS newf DOG ot1 4002').contains('Syntax error')
def testUnknownSymbolErrorOnConditionalAdd(env):
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TAG f2 NUMERIC NOINDEX f3 TAG NOINDEX').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').error()
def testWrongResultsReturnedBySkipOptimization(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT', 'f2', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'f1', 'foo', 'f2', 'bar').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'f1', 'moo', 'f2', 'foo').equal('OK')
env.expect('ft.search', 'idx', 'foo @f2:moo').equal([0L])
def testErrorWithApply(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo bar').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'split()')[1]
env.assertEqual(str(err[0]), 'Invalid number of arguments for split')
def testSummerizeWithAggregateRaiseError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', '1', 'test',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0').error()
def testSummerizeHighlightParseError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', 'WITHSCORES').error()
env.expect('ft.search', 'idx', 'foo2', 'HIGHLIGHT', 'FIELDS', 'WITHSCORES').error()
def testCursorBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0',
'WITHCURSOR', 'COUNT', 'BAD').error()
def testLimitBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'LIMIT', '1').error()
def testOnTimeoutBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'bad').error()
def testAggregateSortByWrongArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', 'bad').error()
def testAggregateSortByMaxNumberOfFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA',
'test1', 'TEXT', 'SORTABLE',
'test2', 'TEXT', 'SORTABLE',
'test3', 'TEXT', 'SORTABLE',
'test4', 'TEXT', 'SORTABLE',
'test5', 'TEXT', 'SORTABLE',
'test6', 'TEXT', 'SORTABLE',
'test7', 'TEXT', 'SORTABLE',
'test8', 'TEXT', 'SORTABLE',
'test9', 'TEXT', 'SORTABLE'
).equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *['@test%d' % (i + 1) for i in range(9)]).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX', 'bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
def testNumericFilterError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad', '2').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', '2', 'FILTER', 'test', '0', 'bla').error()
def testGeoFilterError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', 'bad' , '2', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , 'bad', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', 'bad', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', '3', 'bad').error()
def testReducerError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as').error()
def testGroupbyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test1').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'bad', '0').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'SUM', '1', '@test1').error()
def testGroupbyWithSort(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc3', '1.0', 'FIELDS', 'test', '2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '2', '@test', 'ASC',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as', 'count').equal([2L, ['test', '2', 'count', '1'], ['test', '1', 'count', '2']])
def testApplyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'APPLY', 'split(@test)', 'as').error()
def testLoadError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', '@test').error()
def testMissingArgsError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx').error()
def testUnexistsScorer(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'SCORER', 'bad').error()
def testHighlightWithUnknowsProperty(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'HIGHLIGHT', 'FIELDS', '1', 'test1').error()
def testBadFilterExpression(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', 'blabla').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', '@test1 > 1').error()
def testWithSortKeysOnNoneSortableValue(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHSORTKEYS', 'SORTBY', 'test').equal([1L, 'doc1', '$foo', ['test', 'foo']])
def testWithWithRawIds(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
waitForIndex(env, 'idx')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHRAWIDS').equal([1L, 'doc1', 1L, ['test', 'foo']])
def testUnkownIndex(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('ft.aggregate').error()
env.expect('ft.aggregate', 'idx', '*').error()
env.expect('ft.aggregate', 'idx', '*', 'WITHCURSOR').error()
def testExplainError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('FT.EXPLAIN', 'idx', '(').error()
def testBadCursor(env):
env.expect('FT.CURSOR', 'READ', 'idx').error()
env.expect('FT.CURSOR', 'READ', 'idx', '1111').error()
env.expect('FT.CURSOR', 'READ', 'idx', 'bad').error()
env.expect('FT.CURSOR', 'DROP', 'idx', '1111').error()
env.expect('FT.CURSOR', 'bad', 'idx', '1111').error()
def testGroupByWithApplyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'APPLY', 'split()', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'AS', 'count')[1]
assertEqualIgnoreCluster(env, str(err[0]), 'Invalid number of arguments for split')
def testSubStrErrors(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a', 'APPLY', 'substr(@a,0,4)')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,-2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,1000)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",-1,2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr(1)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "-1", "-1")', 'as', 'a')
env.assertTrue(env.isUp())
def testToUpperLower(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower("FOO")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper("foo")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testMatchedTerms(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(-100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms("test")', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
def testStrFormatError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%s")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%b", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format(5)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'b', 'APPLY', 'format("%s", @b)', 'as', 'a').equal([1L, ['test', 'foo', 'b', None, 'a', '(null)']])
# working example
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%%s-test", "test")', 'as', 'a').equal([1L, ['a', '%s-test']])
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%s-test", "test")', 'as', 'a').equal([1L, ['a', 'test-test']])
def testTimeFormatError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test1)', 'as', 'a').error()
env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test)', 'as', 'a')
env.assertTrue(env.isUp())
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, 4)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt("awfawf")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(235325153152356426246246246254)', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'hour("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'minute("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'day("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'month("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofweek("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofmonth("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'year("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMonthOfYear(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '4']])
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test, 112)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("bad")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testParseTimeErrors(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11,22)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("%s", "%s")' % ('d' * 2048, 'd' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("test", "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMathFunctions(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'exp(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', 'inf']])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'ceil(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '12234556']])
def testErrorOnOpperation(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '1 + split()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split() + 1', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '"bad" + "bad"', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split("bad" + "bad")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '!(split("bad" + "bad"))', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'APPLY', '!@test', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testSortkeyUnsortable(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'test', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'test', 'foo')
rv = env.cmd('ft.aggregate', 'idx', 'foo', 'withsortkeys',
'load', '1', '@test',
'sortby', '1', '@test')
env.assertEqual([1, '$foo', ['test', 'foo']], rv)
def testIssue919(env):
# This only works if the missing field has a lower sortable index
# than the present field..
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'sortable', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'n1', 42)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 't1', 'desc')
env.assertEqual([1L, 'doc1', ['n1', '42']], rv)
def testIssue1074(env):
# Ensure that sortable fields are returned in their string form from the
# document
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 'n1', 1581011976800)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 'n1')
env.assertEqual([1L, 'doc1', ['n1', '1581011976800', 't1', 'hello']], rv)
def testIssue1085(env):
env.skipOnCluster()
env.cmd('FT.CREATE issue1085 ON HASH SCHEMA foo TEXT SORTABLE bar NUMERIC SORTABLE')
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_%d 1 REPLACE FIELDS foo foo%d bar %d' % (i, i, i))
res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'document_8', ['foo', 'foo8', 'bar', '8']]))
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_8 1 REPLACE FIELDS foo foo8 bar 8')
env.expect('ft.debug GC_FORCEINVOKE issue1085').equal('DONE')
res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1, 'document_8', ['foo', 'foo8', 'bar', '8']]))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
def testOptimize(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
env.cmd('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo')
env.assertEqual(0, env.cmd('FT.OPTIMIZE', 'idx'))
with env.assertResponseError():
env.assertOk(env.cmd('FT.OPTIMIZE', 'idx', '666'))
env.expect('FT.OPTIMIZE', 'fake_idx').error()
def testInfoError(env):
env.expect('ft.info', 'no_idx').error()
def testSetPayload(env):
env.skipOnCluster()
env.expect('flushall')
env.expect('ft.create idx ON HASH schema name text').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hilton').equal('OK')
env.expect('FT.SETPAYLOAD idx hotel payload').equal('OK')
env.expect('FT.SETPAYLOAD idx hotel payload').equal('OK')
env.expect('FT.SETPAYLOAD idx fake_hotel').error() \
.contains("wrong number of arguments for 'FT.SETPAYLOAD' command")
env.expect('FT.SETPAYLOAD fake_idx hotel payload').error().contains('Unknown Index name')
env.expect('FT.SETPAYLOAD idx fake_hotel payload').error().contains('Document not in index')
def testIndexNotRemovedFromCursorListAfterRecreated(env):
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').ok()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').error()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
def testHindiStemmer(env):
env.cmd('FT.CREATE', 'idxTest', 'LANGUAGE_FIELD', '__language', 'SCHEMA', 'body', 'TEXT')
env.cmd('FT.ADD', 'idxTest', 'doc1', 1.0, 'LANGUAGE', 'hindi', 'FIELDS', 'body', u'अँगरेजी अँगरेजों अँगरेज़')
res = env.cmd('FT.SEARCH', 'idxTest', u'अँगरेज़')
res1 = {res[2][i]:res[2][i + 1] for i in range(0, len(res[2]), 2)}
env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res1['body'], 'utf-8'))
def testMOD507(env):
env.skipOnCluster()
env.expect('ft.create idx ON HASH SCHEMA t1 TEXT').ok()
for i in range(50):
env.expect('ft.add idx doc-%d 1.0 FIELDS t1 foo' % i).ok()
for i in range(50):
env.expect('del doc-%d' % i).equal(1)
res = env.cmd('FT.SEARCH', 'idx', '*', 'WITHSCORES', 'SUMMARIZE', 'FRAGS', '1', 'LEN', '25', 'HIGHLIGHT', 'TAGS', "<span style='background-color:yellow'>", "</span>")
# from redisearch 2.0, docs are removed from index when `DEL` is called
env.assertEqual(len(res), 1)
def testUnseportedSortableTypeErrorOnTags(env):
env.skipOnCluster()
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT SORTABLE f2 NUMERIC SORTABLE NOINDEX f3 TAG SORTABLE NOINDEX f4 TEXT SORTABLE NOINDEX').ok()
env.expect('FT.ADD idx doc1 1.0 FIELDS f1 foo1 f2 1 f3 foo1 f4 foo1').ok()
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL FIELDS f2 2 f3 foo2 f4 foo2').ok()
res = env.cmd('HGETALL doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2', '__score', '1.0']))
res = env.cmd('FT.SEARCH idx *')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2']]))
def testIssue1158(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT txt3 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 10 txt2 num1')
res = env.cmd('FT.GET idx doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '10', 'txt2', 'num1']))
# only 1st checked (2nd returns an error)
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt1||to_number(@txt2)<5 FIELDS txt1 5').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt3&&to_number(@txt2)<5 FIELDS txt1 5').equal('NOADD')
# both are checked
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)<42 FIELDS txt2 num2').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)<42 FIELDS txt2 num2').equal('NOADD')
res = env.cmd('FT.GET idx doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '5', 'txt2', 'num2']))
def testIssue1159(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA f1 TAG')
for i in range(1000):
env.cmd('FT.add idx doc%d 1.0 FIELDS f1 foo' % i)
def testIssue1169(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 foo')
env.expect('FT.AGGREGATE idx foo GROUPBY 1 @txt1 REDUCE FIRST_VALUE 1 @txt2 as test').equal([1L, ['txt1', 'foo', 'test', None]])
def testIssue1184(env):
env.skipOnCluster()
field_types = ['TEXT', 'NUMERIC', 'TAG']
env.assertOk(env.execute_command('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0))
for ft in field_types:
env.assertOk(env.execute_command('FT.CREATE idx ON HASH SCHEMA field ' + ft))
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
value = '42'
env.assertOk(env.execute_command('FT.ADD idx doc0 1 FIELD field ' + value))
doc = env.cmd('FT.SEARCH idx *')
env.assertEqual(doc, [1L, 'doc0', ['field', value]])
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertGreater(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '1')
env.assertEqual(env.execute_command('FT.DEL idx doc0'), 1)
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
env.cmd('FT.DROP idx')
env.cmd('DEL doc0')
def testIndexListCommand(env):
env.expect('FT.CREATE idx1 ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT.CREATE idx2 ON HASH SCHEMA n NUMERIC').ok()
res = env.cmd('FT._LIST')
env.assertEqual(set(res), set(['idx1', 'idx2']))
env.expect('FT.DROP idx1').ok()
env.expect('FT._LIST').equal(['idx2'])
env.expect('FT.CREATE idx3 ON HASH SCHEMA n NUMERIC').ok()
res = env.cmd('FT._LIST')
env.assertEqual(set(res), set(['idx2', 'idx3']))
def testIssue1208(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC')
env.cmd('FT.ADD idx doc1 1 FIELDS n 1.0321e5')
env.cmd('FT.ADD idx doc2 1 FIELDS n 101.11')
env.cmd('FT.ADD idx doc3 1 FIELDS n 0.0011')
env.expect('FT.SEARCH', 'idx', '@n:[1.1432E3 inf]').equal([1L, 'doc1', ['n', '1.0321e5']])
env.expect('FT.SEARCH', 'idx', '@n:[-1.12E-3 1.12E-1]').equal([1L, 'doc3', ['n', '0.0011']])
res = [3L, 'doc3', ['n', '0.0011'], 'doc2', ['n', '101.11'], 'doc1', ['n', '1.0321e5']]
env.expect('FT.SEARCH', 'idx', '@n:[-inf inf]').equal(res)
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n>42e3 FIELDS n 100').equal('NOADD')
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n<42e3 FIELDS n 100').ok()
# print env.cmd('FT.SEARCH', 'idx', '@n:[-inf inf]')
def testFieldsCaseSensetive(env):
# this test will not pass on coordinator coorently as if one shard return empty results coordinator
# will not reflect the errors
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC f TEXT t TAG g GEO')
# make sure text fields are case sesitive
conn.execute_command('hset', 'doc1', 'F', 'test')
conn.execute_command('hset', 'doc2', 'f', 'test')
env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']])
env.expect('ft.search idx @F:test').equal([0])
# make sure numeric fields are case sesitive
conn.execute_command('hset', 'doc3', 'N', '1.0')
conn.execute_command('hset', 'doc4', 'n', '1.0')
env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@N:[0 2]').equal([0])
# make sure tag fields are case sesitive
conn.execute_command('hset', 'doc5', 'T', 'tag')
conn.execute_command('hset', 'doc6', 't', 'tag')
env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']])
env.expect('ft.search', 'idx', '@T:{tag}').equal([0])
# make sure geo fields are case sesitive
conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244')
conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244')
env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']])
env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0])
# make sure search filter are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0])
# make sure RETURN are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []])
# make sure SORTBY are case sensitive
conn.execute_command('hset', 'doc7', 'n', '1.1')
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema')
# make sure aggregation load are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n').equal([1L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@N').equal([1L, [], []])
# make sure aggregation apply are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline')
# make sure aggregation filter are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@n==1.0').equal([1L, ['n', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@N==1.0').error().contains('not loaded in pipeline')
# make sure aggregation groupby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property')
# make sure aggregation sortby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@N').error().contains('not loaded')
def testSortedFieldsCaseSensetive(env):
# this test will not pass on coordinator coorently as if one shard return empty results coordinator
# will not reflect the errors
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE f TEXT SORTABLE t TAG SORTABLE g GEO SORTABLE')
# make sure text fields are case sesitive
conn.execute_command('hset', 'doc1', 'F', 'test')
conn.execute_command('hset', 'doc2', 'f', 'test')
env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']])
env.expect('ft.search idx @F:test').equal([0])
# make sure numeric fields are case sesitive
conn.execute_command('hset', 'doc3', 'N', '1.0')
conn.execute_command('hset', 'doc4', 'n', '1.0')
env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@N:[0 2]').equal([0])
# make sure tag fields are case sesitive
conn.execute_command('hset', 'doc5', 'T', 'tag')
conn.execute_command('hset', 'doc6', 't', 'tag')
env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']])
env.expect('ft.search', 'idx', '@T:{tag}').equal([0])
# make sure geo fields are case sesitive
conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244')
conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244')
env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']])
env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0])
# make sure search filter are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0])
# make sure RETURN are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []])
# make sure SORTBY are case sensitive
conn.execute_command('hset', 'doc7', 'n', '1.1')
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema')
# make sure aggregation apply are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline')
# make sure aggregation filter are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@n==1.0').equal([1L, ['n', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@N==1.0').error().contains('not loaded in pipeline')
# make sure aggregation groupby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property')
# make sure aggregation sortby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@N').error().contains('not loaded')
def testScoreLangPayloadAreReturnedIfCaseNotMatchToSpecialFields(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE')
conn.execute_command('hset', 'doc1', 'n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10')
res = env.cmd('ft.search', 'idx', '@n:[0 2]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10']]))
def testReturnSameFieldDifferentCase(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE')
conn.execute_command('hset', 'doc1', 'n', '1.0', 'N', '2.0')
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '2', 'n', 'N').equal([1L, 'doc1', ['n', '1', 'N', '2']])
def testCreateIfNX(env):
env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok()
env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok()
def testDropIfX(env):
env.expect('FT._DROPIFX idx').ok()
def testDeleteIfX(env):
env.expect('FT._DROPINDEXIFX idx').ok()
def testAlterIfNX(env):
env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok()
env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok()
res = env.cmd('ft.info idx')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}['fields']
env.assertEqual(res, [['n', 'type', 'NUMERIC'], ['n1', 'type', 'NUMERIC']])
def testAliasAddIfNX(env):
env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT._ALIASADDIFNX a1 idx').ok()
env.expect('FT._ALIASADDIFNX a1 idx').ok()
def testAliasDelIfX(env):
env.expect('FT._ALIASDELIFX a1').ok()
|
test_thread_safe_fixtures.py
|
import asyncio
import threading
import pytest
@pytest.fixture(scope="package")
def event_loop():
loop = asyncio.get_event_loop_policy().new_event_loop()
is_ready = threading.Event()
def run_forever():
is_ready.set()
loop.run_forever()
thread = threading.Thread(target=lambda: run_forever(), daemon=True)
thread.start()
is_ready.wait()
yield loop
@pytest.fixture(scope="package")
async def async_fixture(event_loop):
await asyncio.sleep(0)
yield "fixture"
@pytest.mark.asyncio
def test_event_loop_thread_safe(async_fixture):
"""Make sure that async fixtures still work, even if the event loop
is running in another thread.
"""
assert async_fixture == "fixture"
|
run.py
|
#!/usr/bin/env python
"""
Created on Mon Aug 29 2016
Modified from the similar program by Nick
@author: Adrian Utama
Modified from authors above to have python work seamlessly in Windows and Linux.
@author: Chin Chean Lim
Optical Powermeter (tkinter) Version 1.03 (last modified on Aug 29 2019)
v1.01: There was a bug as worker thread 1 run endlessly when the program just started. Fixed.
v1.02: Corrected calibration (svn-8 onwards)
v1.03: Updated device selection from serial list to work with Windows.
"""
import tkinter
import os
import glob
import time
import powermeter as pm
import queue
import threading
import json
import serial.tools.list_ports
REFRESH_RATE = 100 #100ms
NUM_OF_AVG = 50
RESISTORS = [1e6,100e3,10e3,1e3,20] #Sense resistors that is used. The 0th element refers to the 1st range. The second resistor should be 100 kOhm (110 kOhm parallel with 1 MOhm)
CALIBRATION_FILE = 's5106_interpolated.cal' #detector calibration file
def insanity_check(number, min_value, max_value):
''' To check whether the value is out of given range'''
if number > max_value:
return max_value
if number < min_value:
return min_value
else:
return number
class GuiPart:
def __init__(self, master, queue, endCommand):
self.queue = queue
# Variable to signify the start of the measurement
self.started = 0
self.trigger = 0 # If this is true, the program will flush the previous average values
self.range = 1
# Set the object, initial value, and steps for the modifiable parameter (using buttonModifyPressed).
# Up to now there is only one entry at channel 0, which is the wavelength detection of the laser
self.entry = [0]
self.set_value = [780]
self.rough_step = [30]
self.fine_step = [1]
# Set up the GUI
tkinter.Label(master, text='Select Device', font=("Helvetica", 16)).grid(row=1, padx=5, pady=5, column=1)
portslist = list(serial.tools.list_ports.comports())
self.devicelist = []
self.addresslist = []
if len(portslist)==0:
print("No Serial Devices detected!")
root.destroy()
for port in portslist:
self.devicelist.append(port.device+" "+port.description)
self.addresslist.append(port.device)
self.set_ports = tkinter.StringVar(master)
self.ports_option = tkinter.OptionMenu(master, self.set_ports, *self.devicelist)
self.ports_option.grid(row = 1, padx = 5, pady = 5, column = 2, columnspan = 3)
self.ports_option.configure(font=("Helvetica", 14), width = 12, justify=tkinter.LEFT)
tkinter.Button(master, text=' Start ', font=("Helvetica", 16), command=lambda:self.startDevice()).grid(sticky="w", row=1, column=5, columnspan = 3, padx=5, pady=5)
tkinter.Label(master, text='Select Range', font=("Helvetica", 16)).grid(row=2, padx=5, pady=5, column=1)
self.set_range = tkinter.StringVar(master)
self.set_range.set("1")
self.range_option = tkinter.OptionMenu(master, self.set_range, "1", "2", "3", "4", "5")
self.range_option.grid(row = 2, padx = 5, pady = 5, column = 2, columnspan = 3)
self.range_option.configure(font=("Helvetica", 14), width = 12, justify=tkinter.LEFT)
self.set_range.trace('w', lambda *args: self.changeRange())
self.set_autorange = tkinter.IntVar()
self.set_autorange.set(1)
self.chk_set = tkinter.Checkbutton(root, text='Auto', font=("Helvetica", 16), variable=self.set_autorange)
self.chk_set.grid(row=2, column=5, columnspan = 3, padx=5, pady=5, sticky="w")
tkinter.Label(master, text='Wavelength', font=("Helvetica", 16)).grid(row=3, padx=5, pady=5, column=1)
self.entry[0] = tkinter.Entry(master, width=10, font=("Helvetica", 16), justify=tkinter.CENTER)
self.entry[0].grid(row=3, column=4)
self.entry[0].insert(0, str(self.set_value[0])+ " nm")
tkinter.Button(master, text='<<', font=("Helvetica", 12), command=lambda:self.buttonModifyPressed(0, 1)).grid(row=3, column=2)
tkinter.Button(master, text='<', font=("Helvetica", 12), command=lambda:self.buttonModifyPressed(0, 2)).grid(row=3, column=3)
tkinter.Button(master, text='>', font=("Helvetica", 12), command=lambda:self.buttonModifyPressed(0, 3)).grid(row=3, column=5)
tkinter.Button(master, text='>>',font=("Helvetica", 12), command=lambda:self.buttonModifyPressed(0, 4)).grid(row=3, column=6)
tkinter.Label(master, text='', font=("Helvetica", 16), width = 2).grid(row=3, padx=5, pady=5, column=7)
self.display_opm = tkinter.StringVar(master)
self.display_opm.set("OFF")
self.label_display_opm = tkinter.Label(master, font=("Helvetica", 60), textvariable=self.display_opm, width=10, bg="black", fg="white")
self.label_display_opm.grid(row=4, columnspan=8, padx=5, pady=5)
tkinter.Button(master, text='Shutdown', font=("Helvetica", 16), command=endCommand).grid(row=10, column=1, padx=5, pady=5)
def startDevice(self):
self.started = 1
for idx, device in enumerate(self.devicelist):
if self.set_ports.get() == device:
deviceAddress = self.addresslist[idx]
print("SelectedPort "+deviceAddress)
self.powermeter = pm.pmcommunication(deviceAddress)
self.changeRange()
print(self.set_ports.get(), "ready to go.")
def changeRange(self):
if self.started == 1:
self.started = 0 # Cut off the measurement for a small time while setting the new values
self.range = int(self.set_range.get())
self.powermeter.set_range(self.range)
self.trigger = 1
self.started = 1 # Resume the measurement
else:
print("You have not started connection to any device.")
def buttonModifyPressed(self, channel, button_type):
# This function is to modify the value in the entry box by using <<, <, >, and >>
# Channel 0 refers the entry box: wavelength of the laser
if button_type == 1:
self.set_value[channel] -= self.rough_step[channel]
elif button_type == 2:
self.set_value[channel] -= self.fine_step[channel]
elif button_type == 3:
self.set_value[channel] += self.fine_step[channel]
elif button_type == 4:
self.set_value[channel] += self.rough_step[channel]
if channel == 0:
# Wavelength. The min and max value provided by the calibration table
self.set_value[channel] = insanity_check(self.set_value[channel], 340, 1099)
self.entry[channel].delete(0, tkinter.END)
self.entry[0].insert(0, str(self.set_value[0])+ " nm")
else:
pass
def processIncoming(self):
"""Handle all messages currently in the queue, if any."""
while self.queue.qsize( ):
try:
msg = self.queue.get(0)
# Check contents of message and do whatever is needed. As a
# simple test, print it (in real life, you would
# suitably update the GUI's display in a richer fashion).
print(msg)
except queue.Empty:
# just on general principles, although we don't
# expect this branch to be taken in this case
pass
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
self.master = master
self.running = 1
# Create the queue
self.queue = queue.Queue()
# Set up the GUI part
self.gui = GuiPart(master, self.queue, self.endApplication)
master.protocol("WM_DELETE_WINDOW", self.endApplication) # About the silly exit button
# Start the procedure regarding the initialisation of experimental parameters and objects
self.initialiseParameters()
# Set up the thread to do asynchronous I/O
# More threads can also be created and used, if necessary
self.thread1 = threading.Thread(target=self.workerThread1_OPM)
self.thread1.start( )
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall( )
def initialiseParameters(self):
# Initialisation of several variables
self.average_opm = 0
self.average_voltage_opm = 0
# Obtain the calibration table
f = open(CALIBRATION_FILE,'r')
data = json.load(f)
f.close()
self.wavelength_table = data[0]
self.responsivity_table = data[1]
def periodicCall(self):
"""
Check every 100 ms if there is something new in the queue.
"""
self.gui.processIncoming( )
# Setting a refresh rate for periodic call
self.master.after(REFRESH_RATE, self.periodicCall)
# Check whether it is in autorange mode
MAX_VOLTAGE = 2.4
MIN_VOLTAGE = 0.02
if self.gui.set_autorange.get() == 1:
if self.average_voltage_opm > MAX_VOLTAGE:
new_range = self.gui.range + 1
new_range = insanity_check(new_range,1,5)
self.gui.set_range.set(str(new_range))
self.gui.changeRange()
if self.average_voltage_opm < MIN_VOLTAGE:
new_range = self.gui.range - 1
new_range = insanity_check(new_range,1,5)
self.gui.set_range.set(str(new_range))
self.gui.changeRange()
# Convert from average_voltage_opm to average_opm
self.average_opm = self.conversion(self.average_voltage_opm)
# Updating the display value of optical powermeter
if self.gui.started == 1:
power_str = self.floatToStringPower(self.average_opm)
self.gui.display_opm.set(power_str)
else:
self.gui.display_opm.set("OFF")
# Shutting down the program
if not self.running:
print("Shutting Down")
import sys
sys.exit()
def floatToStringPower(self,variable):
if variable > 1:
display = variable
if variable >= 1e1:
power_str = '%.1f'%round(display,1) + " " + "W"
else:
power_str = '%.2f'%round(display,2) + " " + "W"
elif variable > 1e-3:
display = variable *1e3
if variable >= 1e-2:
power_str = '%.1f'%round(display,1) + " " + "mW"
else:
power_str = '%.2f'%round(display,2) + " " + "mW"
elif variable > 1e-6:
display = variable *1e6
if variable >= 1e-5:
power_str = '%.1f'%round(display,1) + " " + "uW"
else:
power_str = '%.2f'%round(display,2) + " " + "uW"
else:
display = variable *1e9
power_str = '%.1f'%round(display,1) + " " + "nW"
return power_str
def conversion(self, voltage):
# Function that converts voltage to power
amperage = voltage/RESISTORS[self.gui.range - 1] # The 1st range refer to the 0th element of RESISTORS array
index_wavelength = self.wavelength_table.index(int(self.gui.set_value[0])) # self.gui.set_value[0] refers to wavelength
responsivity = self.responsivity_table[index_wavelength]
power = amperage/float(responsivity)
return power
def workerThread1_OPM(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select( )'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
while self.running:
if self.gui.started == True:
# To simulate asynchronous I/O, we create a random number at
# random intervals. Replace the following two lines with the real
# thing.
try:
# Optical Powermeter
if self.gui.trigger == 1:
time.sleep(0.02) # Time to wait for the physical changes to the device: 20 ms
now = float(self.gui.powermeter.get_voltage())
self.average_voltage_opm = now # Flush the previous measured values
self.gui.trigger = 0
else:
now = float(self.gui.powermeter.get_voltage())
self.average_voltage_opm = (NUM_OF_AVG-1)*self.average_voltage_opm/NUM_OF_AVG + now/NUM_OF_AVG
except:
pass
else:
time.sleep(0.1) # To prevent the program to run endlessly
def endApplication(self):
# Kill and wait for the processes to be killed
self.running = 0
time.sleep(0.1)
# Close the connection to the device
if self.gui.started:
self.gui.powermeter.reset()
self.gui.powermeter.close_port()
if __name__ == '__main__':
root = tkinter.Tk( )
root.title("Optical Powermeter Version 1.03")
img = tkinter.PhotoImage(file='icon.png')
root.tk.call('wm', 'iconphoto', root._w, img)
client = ThreadedClient(root)
root.mainloop( )
|
setupModels.py
|
def setupModels(sys,os,utils,config,random,mproc,modelList):
processes = []
from FMModel import FMModel
from SVDModel import SVDModel
for trial in range(0,config.TRIALS):
strTrial = str(trial)
print("Setting up trial " + strTrial)
p = mproc.Process(target=setupTrial,
args=(utils.MODEL_BOOT_PATH,
strTrial,
utils.PROCESSED_DATA_PATH,
utils.PROCESSED_DATA_PATH_TEMP,
utils.bootsplit,
config.BOOTSTRAP_SPLITS[0],
random, utils.TEST_IDS_DUMMY_PATH))
p.start()
processes.append(p)
for p in processes:
p.join()
processes = []
for trial in range(0,config.TRIALS):
strTrial = str(trial)
for configModel in config.models:
print("Setting up model " + configModel[0])
if configModel[1] == 'FM':
model = FMModel(configModel,utils,config,strTrial)
if configModel[1] == 'SVD':
model = SVDModel(configModel,utils,config,strTrial)
p = mproc.Process(target=model.setup)
processes.append(p)
p.start()
modelList.append(model)
for p in processes:
p.join()
def setupTrial(modelBootPath,strTrial,processedDataPath,processedDataPathTemp,bootsplitFunc,split,random,testIdsDummyPath):
import os
### Setup boot strings ###
bootTrain = modelBootPath + \
'train' + '_t' + strTrial
bootCV = modelBootPath + \
'CV' + '_t' + strTrial
bootTest = modelBootPath + \
'test' + '_t' + strTrial
### Split dataset ###
### Setup test datasets separate for parallel ###
bootsplitFunc(processedDataPath,
processedDataPathTemp + '_t' + strTrial,
bootTrain, bootCV, split,
random)
os.system('cp ' + testIdsDummyPath +
' ' + bootTest)
|
PureSDN22.py
|
# Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, Chongqing, China.
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu import cfg
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from threading import Thread
import copy
import time
from operator import attrgetter
import GA_compute
from ryu import cfg
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from DemandEstimation import demand_estimation
from GA_test import GaProcessor
from ryu.lib.packet import ethernet
from SRrouting import ShortestForwarding
CONF = cfg.CONF
import network_awareness
import setting
import networkx as nx
CONF = cfg.CONF
class ShortestForwarding(app_manager.RyuApp):
"""
ShortestForwarding is a Ryu app for forwarding packets on shortest path.
This App does not defined the path computation method.
To get shortest path, this module depends on network awareness and
network monitor modules.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
"network_awareness": network_awareness.NetworkAwareness,
}
WEIGHT_MODEL = {'hop': 'weight','bw':'bw'}
def __init__(self, *args, **kwargs):
super(ShortestForwarding, self).__init__(*args, **kwargs)
self.name = "shortest_forwarding"
self.awareness = kwargs["network_awareness"]
self.datapaths = {}
self.seletPathIndex=0
self.weight = self.WEIGHT_MODEL[CONF.weight]
self.newComingFlows={}
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.pre_GFF_path = {}
self.flow_speed = {}
self.stats = {}
self.flow_index = []
self.select = {}
self.congested = False
self.flows_len = 0
self.flows = {}
self.traffics = {}
self.hostsList = []
self.port_features = {}
self.free_bandwidth = {} # self.free_bandwidth = {dpid:{port_no:free_bw,},} unit:Kbit/s
self.current_free_bandwidth = {}
self.current_dectect_dp = {}
self.awareness = lookup_service_brick('awareness')
# self.shortest_forwarding = lookup_service_brick('shortest_forwarding')
self.graph = None
self.capabilities = None
self.best_paths = None
self.k = 0
self.gp = GaProcessor()
self.paths = {}
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
@set_ev_cls(ofp_event.EventOFPStateChange, [MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Collect datapath information.
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while CONF.weight == 'bw' or CONF.weight=='hop':
self.stats['flow'] = {}
self.stats['port'] = {}
self.current_dectect_dp=[]
self.statRecord = []
self.flows = {}
print len(self.newComingFlows)
# self.traffics={}
self.congested=False
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
# Refresh data.
self.capabilities = None
self.best_paths = None
self.create_bw_graph(self.free_bandwidth)
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
'''
In packet_in handler, we need to learn access_table by ARP and IP packets.
'''
msg = ev.msg
pkt = packet.Packet(msg.data)
arp_pkt = pkt.get_protocol(arp.arp)
ip_pkt = pkt.get_protocol(ipv4.ipv4)
if isinstance(arp_pkt, arp.arp):
self.logger.debug("ARP processing")
self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip)
if isinstance(ip_pkt, ipv4.ipv4):
self.logger.debug("IPV4 processing")
if len(pkt.get_protocols(ethernet.ethernet)):
eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype
self.shortest_forwarding(msg, eth_type, ip_pkt.src, ip_pkt.dst)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply information into self.flow_stats.
Calculate flow speed and Save it.
(old) self.flow_stats = {dpid:{(in_port, ipv4_dst, out-port):[(packet_count, byte_count, duration_sec, duration_nsec),],},}
(old) self.flow_speed = {dpid:{(in_port, ipv4_dst, out-port):[speed,],},}
(new) self.flow_stats = {dpid:{(priority, ipv4_src, ipv4_dst):[(packet_count, byte_count, duration_sec, duration_nsec),],},}
(new) self.flow_speed = {dpid:{(priority, ipv4_src, ipv4_dst):[speed,],},}
Because the proactive flow entrys don't have 'in_port' and 'out-port' field.
Note: table-miss, LLDP and ARP flow entries are not what we need, just filter them.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.statRecord.append(dpid)
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if (
(flow.priority not in [0, 10, 65535]) and (flow.match.get('ipv4_src')) and (flow.match.get('ipv4_dst')))],
key=lambda flow: (flow.priority, flow.match.get('ipv4_src'), flow.match.get('ipv4_dst'))):
src = stat.match['ipv4_src']
dst = stat.match['ipv4_dst']
for f in self.newComingFlows.keys():
if f[0] == src and f[1] == dst:
swPair = self.newComingFlows.get(f)
key = (stat.priority, src, dst)
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed and Save it.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3], tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1], pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
# Record flows thatport need to be rescheduled. (hmc)
flowDemand = speed * 8.0 / (setting.MAX_CAPACITY * 1024)
if flowDemand > 0.0:
# if src not in self.hostsList:
# self.hostsList.append(src)
# if dst not in self.hostsList:
# self.hostsList.append(dst)
self.flows[key] = {'src': src, 'dst': dst, 'speed': speed, 'match':stat.match,'priority': stat.priority,
'swPair': swPair}
# if not self.pre_GFF_path.has_key((src, dst)):
# self.pre_GFF_path[(src, dst)] = None
# Estimate flows' demands if all the flow_stat replies are received.
if len(self.statRecord) == 8 and self.flows:
# #clear the new coming flows avoid impacting next round rerouting
flows = sorted([flow for flow in self.flows.values()], key=lambda flow: (flow['src'], flow['dst']))
# hostsList = sorted(self.hostsList)
# self._demandEstimator(flows, hostsList)
# if self.congested==1:
print("it is time to reroute!")
# self._demandEstimator(flows,hostsList)
self._reroute(flows)
self.newComingFlows.clear()
else:
pass
def _demandEstimator(self, flows, hostsList):
estimated_flows = demand_estimation(flows, hostsList)
self._reroute(estimated_flows)
def _reroute(self, flows):
# estimated_flows = demand_estimation(flows, hostsList)
self.traffics = {}
count = 0
j = 0
route_list = []
for flow in flows:
self.paths[flow['swPair']] = self._ip2sw(flow['swPair'])
print self.paths
self.traffics[count] = flow
count = count + 1
currentFlows = self.traffics
flow_len = len(currentFlows)
if flow_len > 1:
start = time.time()
route_list = GA_compute._GA_start(flow_len)
end = time.time()
print("computing time " + str(end - start))
if route_list != []:
for k in route_list:
flow = currentFlows[j]
j = j + 1
core = 1001 + k % 4
Thread(target=self._GlobalFirstFit, args=(flow, core)).start()
def _ip2sw(self, swPair):
src_dp = swPair[0]
dst_dp = swPair[1]
paths = self.awareness.shortest_paths.get(src_dp).get(dst_dp)
return paths
def swToSegments(self,path):
datapaths=self.datapaths
link_to_port=self.awareness.link_to_port
first_dp = datapaths[path[0]]
portList = [] # it includes all push mpls labels of the path
Pathlen = len(path)
if Pathlen == '':
self.logger.info("Path error!")
return
port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1])
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
first_output = port_pair[0]
portList.append(first_output)
for i in xrange(1, Pathlen - 1):
port_next = self.get_port_pair_from_link(link_to_port, path[i], path[i + 1])
if port_next:
port = port_next[0]
portList.append(port)
return first_dp,portList
def _GlobalFirstFit(self,flow,core):
'''
Do the Hedera Global First Fit here.
self.awareness.link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
self.free_bandwidth = {dpid:{port_no:free_bw,},} Unit:Kbit/s
'''
swPair=flow['swPair']
paths=self.paths.get(swPair)
if paths==None:
paths=self._ip2sw(swPair)
for path in paths:
if path[int((len(path) - 1) / 2)] == core:
bucket=self.swToSegments(path)
self._install_GFF_path(bucket, flow['match'], flow['priority'])
def _install_GFF_path(self, bucket, match, priority):
'''
Installing the Global First Fit path.
"match": {"dl_type": 2048, "in_port": 3,
"ipv4_src": "10.1.0.1", "ipv4_dst": "10.8.0.2"}
flow_info = (eth_type, src_ip, dst_ip, priority)
'''
flow_info = (match['eth_type'], match['ipv4_src'], match['ipv4_dst'], priority)
self.Segment_forwarding(flow_info,bucket)
# Install flow entries to datapaths along the path.
def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0):
"""
Send a flow entry to datapath.
"""
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=priority,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
ip_src = msg.match.get("ipv4_src")
ip_dst = msg.match.get("ipv4_dst")
if self.newComingFlows!={}:
self.newComingFlows.pop((ip_src,ip_dst))
def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
Build packet out object.
"""
actions = []
if dst_port:
actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))
msg_data = None
if buffer_id == datapath.ofproto.OFP_NO_BUFFER:
if data is None:
return None
msg_data = data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=buffer_id,
data=msg_data, in_port=src_port, actions=actions)
return out
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("Link from dpid:%s to dpid:%s is not in links" %
(src_dpid, dst_dpid))
return None
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats information into self.port_stats.
Calculate port speed and Save it.
self.port_stats = {(dpid, port_no):[(tx_bytes, rx_bytes, rx_errors, duration_sec, duration_nsec),],}
self.port_speed = {(dpid, port_no):[speed,],}
Note: The transmit performance and receive performance are independent of a port.
We calculate the load of a port only using tx_bytes.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.current_dectect_dp.append(dpid)
self.stats['port'][dpid] = body
self.current_free_bandwidth.setdefault(dpid,{})
self.free_bandwidth.setdefault(dpid, {})
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
print stat
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed and Save it.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
# Calculate only the tx_bytes, not the rx_bytes. (hmc)
pre = tmp[-2][0]
period = self._get_period(tmp[-1][3], tmp[-1][4], tmp[-2][3], tmp[-2][4])
speed = self._get_speed(self.port_stats[key][-1][0], pre, period)
self._save_stats(self.port_speed, key, speed, 5)
self._save_freebandwidth(dpid, port_no, speed)
def get_sw(self, dpid, in_port, src, dst):
"""
Get pair of source and destination switches.
"""
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src) # src_location = (dpid, port)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst) # dst_location = (dpid, port)
if dst_location:
dst_sw = dst_location[0]
if src_sw and dst_sw:
return src_sw, dst_sw
else:
return None
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
# Recording data.
port_feature = (config, state, p.curr_speed)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
ofproto = msg.datapath.ofproto
reason = msg.reason
dpid = msg.datapath.id
port_no = msg.desc.port_no
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (dpid, port_no, reason)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
if(str(datapath.id).startswith('3')):
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
def get_min_bw_of_links(self, graph, path, min_bw):
"""
Getting bandwidth of path. Actually, the mininum bandwidth
of links is the path's bandwith, because it is the bottleneck of path.
"""
_len = len(path)
if _len > 1:
minimal_band_width = min_bw
for i in xrange(_len-1):
pre, curr = path[i], path[i+1]
if 'bandwidth' in graph[pre][curr]:
bw = graph[pre][curr]['bandwidth']
minimal_band_width = min(bw, minimal_band_width)
else:
continue
return minimal_band_width
else:
return min_bw
def get_best_path_by_bw(self, graph, paths):
"""
Get best path by comparing paths.
Note: This function is called in EFattree module.
"""
capabilities = {}
best_paths = copy.deepcopy(paths)
for src in paths:
for dst in paths[src]:
if src == dst:
best_paths[src][src] = [src]
capabilities.setdefault(src, {src: setting.MAX_CAPACITY})
capabilities[src][src] = setting.MAX_CAPACITY
else:
max_bw_of_paths = 0
best_path = paths[src][dst][0]
for path in paths[src][dst]:
min_bw = setting.MAX_CAPACITY
min_bw = self.get_min_bw_of_links(graph, path, min_bw)
if min_bw > max_bw_of_paths:
max_bw_of_paths = min_bw
best_path = path
best_paths[src][dst] = best_path
capabilities.setdefault(src, {dst: max_bw_of_paths})
capabilities[src][dst] = max_bw_of_paths
# self.capabilities and self.best_paths have no actual utility in this module.
self.capabilities = capabilities
self.best_paths = best_paths
return capabilities, best_paths
def create_static_bw_graph(self):
"""
Save bandwidth data into networkx graph object.
"""
try:
graph = self.awareness.graph
for link in graph.edges():
node1=link[0]
node2=link[1]
graph[node1][node2]['bandwidth']=setting.MAX_CAPACITY*1024
return graph
except:
self.logger.info("Create bw graph exception")
if self.awareness is None:
self.awareness = lookup_service_brick('awareness')
return self.awareness.graph
def create_bw_graph(self, bw_dict):
"""
Save bandwidth data into networkx graph object.
"""
try:
graph = self.awareness.graph
link_to_port = self.awareness.link_to_port
for link in link_to_port:
(src_dpid, dst_dpid) = link
(src_port, dst_port) = link_to_port[link]
if src_dpid in bw_dict and dst_dpid in bw_dict:
bandwidth = bw_dict[src_dpid][src_port]
# Add key:value pair of bandwidth into graph.
if graph.has_edge(src_dpid, dst_dpid):
# graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY
graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
# else:
# graph.add_edge(src_dpid, dst_dpid)
# graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
else:
if graph.has_edge(src_dpid, dst_dpid):
graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY
# else:
# graph.add_edge(src_dpid, dst_dpid)
# graph[src_dpid][dst_dpid]['bandwidth'] = setting.MAX_CAPACITY
return graph
except:
self.logger.info("Create bw graph exception")
if self.awareness is None:
self.awareness = lookup_service_brick('awareness')
return self.awareness.graph
def _save_freebandwidth(self, dpid, port_no, speed):
"""
Calculate free bandwidth of port and Save it.
port_feature = (config, state, p.curr_speed)
self.port_features[dpid][p.port_no] = port_feature
self.free_bandwidth = {dpid:{port_no:free_bw,},}
"""
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
capacity = setting.MAX_CAPACITY # The true bandwidth of link, instead of 'curr_speed'.
free_bw = self._get_free_bw(capacity, speed)
if free_bw==0:
self.congested=True
self.free_bandwidth[dpid].setdefault(port_no, None)
self.free_bandwidth[dpid][port_no] = free_bw
else:
self.logger.info("Port is Down")
def _save_stats(self, _dict, key, value, length=5):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_free_bw(self, capacity, speed):
# freebw: Kbit/s
return max(capacity - speed * 8 / 1000.0, 0)
def _get_time(self, sec, nsec):
return sec + nsec / 1000000000.0
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
def show_topology(self):
# It means the link_to_port table has changed.
_graph = self.graph
print "\n---------------------Link Port---------------------"
print '%6s' % ('switch'),
for node in sorted([node for node in _graph.nodes()], key=lambda node: node):
print '%6d' % node,
print
for node1 in sorted([node for node in _graph.nodes()], key=lambda node: node):
print '%6d' % node1,
for node2 in sorted([node for node in _graph.nodes()], key=lambda node: node):
if (node1, node2) in self.awareness.link_to_port.keys():
print '%6s' % str(self.awareness.link_to_port[(node1, node2)]),
print('%6s' %str(_graph[node1][node2]['bandwidth']))
else:
print '%6s' % '/',
print
print
def show_stat(self, _type):
'''
Show statistics information according to data type.
_type: 'port' / 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[_type]
if _type == 'flow':
print('\ndatapath '
'priority ip_src ip_dst '
' packets bytes flow-speed(Kb/s)')
print('-------- '
'-------- ------------ ------------ '
'--------- ----------- ----------------')
for dpid in sorted(bodys.keys()):
for stat in sorted([flow for flow in bodys[dpid] if ((flow.priority not in [0, 65535]) and (flow.match.get('ipv4_src')) and (flow.match.get('ipv4_dst')))],
key=lambda flow: (flow.priority, flow.match.get('ipv4_src'), flow.match.get('ipv4_dst'))):
print('%8d %8s %12s %12s %9d %11d %16.1f' % (
dpid,
stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'),
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][(stat.priority, stat.match.get('ipv4_src'), stat.match.get('ipv4_dst'))][-1])*8/1000.0))
print
if _type == 'port':
print('\ndatapath port '
' rx-pkts rx-bytes '' tx-pkts tx-bytes '
' port-bw(Kb/s) port-speed(b/s) port-freebw(Kb/s) '
' port-state link-state')
print('-------- ---- '
'--------- ----------- ''--------- ----------- '
'------------- --------------- ----------------- '
'---------- ----------')
_format = '%8d %4x %9d %11d %9d %11d %13d %15.1f %17.1f %10s %10s'
for dpid in sorted(bodys.keys()):
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(_format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes,
stat.tx_packets, stat.tx_bytes,
10000,
abs(self.port_speed[(dpid, stat.port_no)][-1] * 8),
self.free_bandwidth[dpid][stat.port_no],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print
def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
Send packet out packet to assigned datapath.
"""
out = self._build_packet_out(datapath, buffer_id,
src_port, dst_port, data)
if out:
datapath.send_msg(out)
def get_port(self, dst_ip, access_table):
"""
Get access port of dst host.
access_table = {(sw,port):(ip, mac),}
"""
if access_table:
if isinstance(access_table.values()[0], tuple):
for key in access_table.keys():
if dst_ip == access_table[key][0]: # Use the IP address only, not the MAC address. (hmc)
dst_port = key[1]
return dst_port
return None
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
link_to_port = {(src_dpid,dst_dpid):(src_port,dst_port),}
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("Link from dpid:%s to dpid:%s is not in links" %
(src_dpid, dst_dpid))
return None
def flood(self, msg):
"""
Flood packet to the access ports which have no record of host.
access_ports = {dpid:set(port_num,),}
access_table = {(sw,port):(ip, mac),}
"""
datapath = msg.datapath
ofproto = datapath.ofproto
for dpid in self.awareness.access_ports:
for port in self.awareness.access_ports[dpid]:
if (dpid, port) not in self.awareness.access_table.keys():
datapath = self.datapaths[dpid]
out = self._build_packet_out(
datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER, port, msg.data)
datapath.send_msg(out)
self.logger.debug("Flooding packet to access port")
def arp_forwarding(self, msg, src_ip, dst_ip):
"""
Send ARP packet to the destination host if the dst host record
is existed, else flow it to the unknow access port.
result = (datapath, port)
"""
datapath = msg.datapath
ofproto = datapath.ofproto
result = self.awareness.get_host_location(dst_ip)
if result:
# Host has been recorded in access table.
datapath_dst, out_port = result[0], result[1]
datapath = self.datapaths[datapath_dst]
out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER,
out_port, msg.data)
datapath.send_msg(out)
self.logger.debug("Deliver ARP packet to knew host")
else:
# Flood is not good.
self.flood(msg)
def _news_Segment_forwarding_(self,flow_info,bucket):
datapath=bucket[0]
segmentStack=bucket[1]
ofproto=datapath.ofproto
parser = datapath.ofproto_parser
eth_mpls=ethernet.ether.ETH_TYPE_MPLS
actions = []
while len(segmentStack)>1:
mpls_label=segmentStack.pop()
f_label = datapath.ofproto_parser.OFPMatchField.make(datapath.ofproto.OXM_OF_MPLS_LABEL, mpls_label)
actions.append(parser.OFPActionPushMpls(eth_mpls))
actions.append(parser.OFPActionSetField(f_label))
actions.append(parser.OFPActionOutput(segmentStack.pop(),0))
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
match = parser.OFPMatch(
eth_type=flow_info[0],ipv4_src=flow_info[1], ipv4_dst=flow_info[2],in_port=flow_info[-1]
)
mod = parser.OFPFlowMod(datapath=datapath, priority=25,
table_id=0,
idle_timeout=3,
hard_timeout=0,
match=match, instructions=inst)
datapath.send_msg(mod)
def Segment_forwarding(self,flow_info,bucket):
datapath=bucket[0]
segmentStack=bucket[1]
ofproto=datapath.ofproto
parser = datapath.ofproto_parser
eth_mpls=ethernet.ether.ETH_TYPE_MPLS
actions = []
while len(segmentStack)>1:
mpls_label=segmentStack.pop()
f_label = datapath.ofproto_parser.OFPMatchField.make(datapath.ofproto.OXM_OF_MPLS_LABEL, mpls_label)
actions.append(parser.OFPActionPushMpls(eth_mpls))
actions.append(parser.OFPActionSetField(f_label))
actions.append(parser.OFPActionOutput(segmentStack.pop(),0))
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
match = parser.OFPMatch(
eth_type=flow_info[0],ipv4_src=flow_info[1], ipv4_dst=flow_info[2]
)
mod = parser.OFPFlowMod(datapath=datapath, priority=flow_info[-1]+1,
table_id=0,
idle_timeout=2,
hard_timeout=0,
match=match, instructions=inst)
datapath.send_msg(mod)
def get_path(self, src, dst, weight):
"""
Get shortest path from network_awareness module.
generator (nx.shortest_simple_paths( )) produces
lists of simple paths, in order from shortest to longest.
"""
shortest_paths = self.awareness.shortest_paths
if self.seletPathIndex==CONF.k_paths:
self.seletPathIndex=0
if weight == self.WEIGHT_MODEL['hop']:
try:
path= shortest_paths.get(src).get(dst)[self.seletPathIndex]
self.seletPathIndex += 1
return path
except:
return shortest_paths.get(src).get(dst)[0]
def get_path2(self, src, dst, weight):
"""
Get shortest path from network_awareness module.
generator (nx.shortest_simple_paths( )) produces
lists of simple paths, in order from shortest to longest.
"""
#shortest_paths = self.awareness.shortest_paths
# Create bandwidth-sensitive datapath graph.
if weight == self.WEIGHT_MODEL['hop']:
graph = self.awareness.graph
return nx.shortest_path(graph,src,dst,method='dijkstra')
elif weight == self.WEIGHT_MODEL['bw']:
graph = self.graph
path = nx.shortest_path(graph, src, dst, weight='bandwidth', method='dijkstra')
return path
else:
pass
def get_sw(self, dpid, in_port, src, dst):
"""
Get pair of source and destination switches.
"""
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src) # src_location = (dpid, port)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst) # dst_location = (dpid, port)
if dst_location:
dst_sw = dst_location[0]
if src_sw and dst_sw:
return src_sw, dst_sw
else:
return None
def send_flow_mod(self, datapath, flow_info, src_port, dst_port):
"""
Build flow entry, and send it to datapath.
flow_info = (eth_type, src_ip, dst_ip, in_port)
or
flow_info = (eth_type, src_ip, dst_ip, in_port, ip_proto, Flag, L4_port)
"""
parser = datapath.ofproto_parser
actions = []
actions.append(parser.OFPActionOutput(dst_port))
if len(flow_info) == 7:
if flow_info[-3] == 6:
if flow_info[-2] == True:
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=6, tcp_src=flow_info[-1][0],tcp_dst=flow_info[-1][1])
else:
pass
elif flow_info[-3] == 17:
if flow_info[-2] == True:
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2],
ip_proto=17, udp_src=flow_info[-1][0],udp_dst=flow_info[-1][1])
else:
pass
elif len(flow_info) == 4:
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
else:
pass
self.add_flow(datapath, 30, match, actions,
idle_timeout=1, hard_timeout=0)
def install_flow(self, datapaths ,link_to_port, path, flow_info, buffer_id,ip_src, ip_dst,data=None):
'''
Install flow entries for datapaths.
path=[dpid1, dpid2, ...]
flow_info = (eth_type, src_ip, dst_ip, in_port)
or
flow_info = (eth_type, src_ip, dst_ip, in_port, ip_proto, Flag, L4_port)
'''
Pathlen=len(path)
if Pathlen == 0:
self.logger.info("Path error!")
return
in_port = flow_info[3]
first_dp = datapaths[path[0]]
port_pair = self.get_port_pair_from_link(link_to_port, path[0], path[1])
#install flow entry of the second switch to the last switch
for i in xrange(1, Pathlen-1):
port = self.get_port_pair_from_link(link_to_port, path[i - 1], path[i])
if (i < Pathlen-1):
port_next = self.get_port_pair_from_link(link_to_port, path[i], path[i + 1])
else:
port_next=self.awareness.get_host_location(ip_dst)[1]
if port and port_next:
src_port=port[1]
if(i<Pathlen-1):
dst_port =port_next[0]
else:
dst_port=port_next
datapath = datapaths[path[i]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
# if Pathlen>2:
# for i in xrange(1, Pathlen-1):
# port = self.get_port_pair_from_link(link_to_port, path[i-1], path[i])
# port_next = self.get_port_pair_from_link(link_to_port, path[i], path[i+1])
#
# if port and port_next:
# src_port, dst_port = port[1], port_next[0]
# final_output=port_next[1]
# datapath = datapaths[path[i]]
# self.send_flow_mod(datapath, flow_info, src_port, dst_port)
#
#
# last_in_port = final_output
#
# else:
# last_in_port=port_pair[1]
#
# # Install flow entry for the last datapath.
#
#
# if last_in_port is None:
# return
# self.send_flow_mod(last_dp,flow_info,last_in_port,last_out_port)
# Install flow entry for the first datapath.
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
out_port = port_pair[0]
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
# Send packet_out to the first datapath.
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
if(len(path)==Pathlen):
self.logger.info("[PATH]%s<-->%s: %s" % (ip_src, ip_dst, path))
def get_L4_info(self, tcp_pkt, udp_pkt):
"""
Get ip_proto and L4 port number.
"""
ip_proto = None
L4_port = None
Flag = None
if tcp_pkt:
ip_proto = 6
if tcp_pkt.src_port and tcp_pkt.dst_port:
L4_port = tcp_pkt.src_port,tcp_pkt.dst_port
Flag = True
else:
Flag=False
elif udp_pkt:
ip_proto = 17
if udp_pkt.src_port and udp_pkt.dst_port:
L4_port = udp_pkt.src_port,udp_pkt.dst_port
Flag = True
else:
Flag=False
else:
pass
return (ip_proto, L4_port, Flag)
def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):
"""
Calculate shortest forwarding path and Install them into datapaths.
flow_info = (eth_type, ip_src, ip_dst, in_port)
or
flow_info = (eth_type, ip_src, ip_dst, in_port, ip_proto, Flag, L4_port)
"""
datapath = msg.datapath
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
tcp_pkt = pkt.get_protocol(tcp.tcp)
udp_pkt = pkt.get_protocol(udp.udp)
ip_proto = None
L4_port = None
Flag = None
# Get ip_proto and L4 port number.
result = self.get_sw(datapath.id, in_port, ip_src, ip_dst) # result = (src_sw, dst_sw)
src_sw, dst_sw = result[0], result[1]
if setting.enable_Flow_Entry_L4Port:
ip_proto, L4_port, Flag = self.get_L4_info(tcp_pkt, udp_pkt)
if result:
if dst_sw:
src_sw, dst_sw = result[0], result[1]
if ip_proto and L4_port and Flag:
if ip_proto == 6:
L4_Proto = 'TCP'
elif ip_proto == 17:
L4_Proto = 'UDP'
else:
pass
flow_info = (eth_type, ip_src, ip_dst, in_port, ip_proto, Flag, L4_port)
else:
flow_info = (eth_type, ip_src, ip_dst, in_port)
else:
flow_info = (eth_type, ip_src, ip_dst, in_port)
# dst_host and src_host link one same switch
if src_sw == dst_sw:
dst_port = self.awareness.get_host_location(ip_dst)[1]
self.send_flow_mod(datapath, flow_info, in_port, dst_port)
self.send_packet_out(datapath, msg.buffer_id, in_port, dst_port, msg.data)
else:
path = self.get_path(src_sw, dst_sw, weight=self.weight)
if len(path)==5:
self.newComingFlows[(ip_src,ip_dst)]=(path[0],path[-1])
# Path has already been calculated, just get it.
if path == None:
return
try:
bucket=self.swToSegments(path)
self._news_Segment_forwarding_(flow_info,bucket)
except:
self.flood(msg)
|
counter.py
|
#counter.py
##############NETWORK CONFIG###############
import csv
import os
allfile = os.listdir()
def Save(data):
with open('config_counter.csv','w',newline='') as file:
#fw = 'file writer'
fw = csv.writer(file)
fw.writerows(data)
print('Save Done!')
def Read():
if 'config_counter.csv' not in allfile:
allip = [['kitchen','192.168.0.100',7000]]
Save(allip)
with open('config_counter.csv',newline='') as file:
#fr = 'file reader'
fr = csv.reader(file)
data = list(fr)
return data
#kitchen ip:
readip = Read()
ip_kitchen = readip[0]
kitchenip = ip_kitchen[1] # '192.168.0.133' #myip
kitchenport = int(ip_kitchen[2]) # 7800 #myport
print('IP/PORT: ',kitchenip,kitchenport)
##############NETWORK CONFIG###############
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import socket
import threading
GUI = Tk()
GUI.geometry('1000x700')
GUI.title('Counter : โปรแกรมหน้าร้าน')
FONT = ('Angsana New',15)
###########config ip############
def SettingIP(event=None):
GUI2 = Toplevel()
GUI2.geometry('500x300')
GUI2.title('กรุณาตั้งค่า ip ก่อนใช้งาน')
readip = Read()
ip_kitchen = readip[0]
L1 = ttk.Label(GUI2,text='Kitchen IP').pack(pady=10)
v_kitchenip = StringVar()
v_kitchenip.set(ip_kitchen[1])
E1 = ttk.Entry(GUI2,textvariable=v_kitchenip,font=FONT)
E1.pack(pady=10)
L2 = ttk.Label(GUI2,text='Kitchen Port').pack(pady=10)
v_kitchenport = StringVar()
v_kitchenport.set(int(ip_kitchen[2]))
E2 = ttk.Entry(GUI2,textvariable=v_kitchenport,font=FONT)
E2.pack(pady=10)
def SaveSetting():
saveip = [['kitchen',v_kitchenip.get(),v_kitchenport.get()]]
Save(saveip)
messagebox.showinfo('บันทึก ip ใหม่','บันทึก ip ใหม่แล้ว!')
GUI2.withdraw()
B1 = ttk.Button(GUI2,text='Save',command=SaveSetting)
B1.pack(ipady=10,ipadx=20)
GUI2.mainloop()
GUI.bind('<F10>',SettingIP)
F1 = Frame(GUI)
F2 = Frame(GUI)
F1.place(x=20,y=20)
F2.place(x=500,y=20)
L11 = ttk.Label(F1,text='เลือกรายการ',font=FONT,foreground='green').pack()
foodlist = {'1001':{'fid':'1001','name':'ไก่ไม่มีกระดูก','price':20},
'1002':{'fid':'1002','name':'ปลาแซลมอนย่างซีอิ้ว','price':50},
'1003':{'fid':'1003','name':'ไก่เผ็ด','price':45},
'1004':{'fid':'1004','name':'ข้าวยำไก่แซ็ป','price':60},
'1005':{'fid':'1005','name':'มันบด','price':15},
'1006':{'fid':'1006','name':'ปลากระพงทอด','price':70},
'1007':{'fid':'1007','name':'ข้าวเปล่า','price':10},
'1008':{'fid':'1008','name':'น้ำดื่ม','price':7},
'1009':{'fid':'1009','name':'น้ำส้ม','price':15},
'1010':{'fid':'1010','name':'น้ำอัดลม','price':25},
}
global buffer_tablefood
buffer_tablefood = {}
global order_state
order_state = False
global order_no
order_no = 1000
def InsertFood(fid):
global buffer_tablefood
global order_state
global order_no
if order_state == False:
order_no += 1
v_orderno.set(order_no)
order_state = True
if fid not in buffer_tablefood:
flist = foodlist[fid]
#print(flist.values())
flist = list(flist.values()) #['1001','ไก่ไม่มีกระดูก',20]
print(flist)
print(type(flist))
print('---')
quan = 1
total = flist[2] * quan
flist.append(quan)
flist.append(total)
buffer_tablefood[fid] = flist
else:
flist = buffer_tablefood[fid] #['1001','ไก่ไม่มีกระดูก',20,1,20]
flist[-2] = flist[-2] + 1 #เพิ่มค่าเข้าไปอีก 1
flist[-1] = flist[-3] * flist[-2]
buffer_tablefood[fid] = flist
print('Current Table: ',buffer_tablefood)
table_food.delete(*table_food.get_children()) #clear data in table
for vl in buffer_tablefood.values():
table_food.insert('','end',value=vl)
#total
total = sum([ vl[-1] for vl in buffer_tablefood.values()])
v_total.set(f'{total:,.2f} บาท')
#table_food.insert('','end',value=flist)
Ftable = Frame(F1)
Ftable.pack()
rowcount = 0
bcount = 0
for k,v in foodlist.items():
print('KEY:',k)
print('VALUE:',v)
B1 = ttk.Button(Ftable,text=v['name'],width=15)
B1.configure(command=lambda x=k: InsertFood(x))
if bcount % 3 == 0:
rowcount = rowcount + 1 # rowcount += 1
cl = 0
elif bcount % 3 == 1:
cl = 1
elif bcount % 3 == 2:
cl = 2
else:
pass
B1.grid(row=rowcount,column=cl ,padx=10,pady=10,ipady=10)
bcount = bcount + 1
# B1 = ttk.Button(F1,text=foodlist['1001']['name'])
# B1.configure(command=lambda x='1001': InsertFood(x))
# B1.pack(ipadx=20,ipady=10,padx=10,pady=10)
L21 = ttk.Label(F2,text='รายการอาหาร',font=FONT,foreground='green').pack()
header = ['ID','Food Name','Price','Quantity','Total']
hw = [70,150,70,70,70] # | ID | Foodname | Price |xxxx
table_food = ttk.Treeview(F2,height=15,column=header,show='headings')
table_food.pack()
for hd,w in zip(header,hw):
table_food.heading(hd,text=hd)
table_food.column(hd,width=w)
### Total
v_total = StringVar() #ตัวแปรที่ใช้สำหรับเก็บยอดรวม
Ltotal = ttk.Label(GUI,text='Total: ',font=('Angsana New',30),foreground='green').place(x=500,y=450)
total = ttk.Label(GUI,textvariable=v_total)
total.configure(font=('Angsana New',30,'bold'))
total.configure(foreground='green')
total.place(x=600,y=450)
v_orderno = StringVar() #ตัวแปรที่ใช้สำหรับเก็บออร์เดอร์
Lorderno = ttk.Label(GUI,text='Order No. ',font=('Angsana New',30),foreground='green').place(x=500,y=400)
orderno = ttk.Label(GUI,textvariable=v_orderno)
orderno.configure(font=('Angsana New',30,'bold'))
orderno.configure(foreground='green')
orderno.place(x=650,y=400)
########Send Data to Server########
def ConverttoNetwork(data):
text = ''
for d in data.values():
text += '{}={},'.format(d[0],d[-2])
print(text)
text = text[:-1]
#print('k|' + text)
return text
def SendtoKitchen():
global buffer_tablefood
data = 'k|' + 'FSX' + v_orderno.get() + '|'
#clear order no.
v_orderno.set('-')
v_total.set('0.00 บาท')
#clear state
global order_state
order_state = False
#clear data in table
table_food.delete(*table_food.get_children()) #clear data in treeview
data = data + ConverttoNetwork(buffer_tablefood)
print('DATA:',data)
serverip = kitchenip #'192.168.1.30'
port = kitchenport #7000
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
server.connect((serverip,port))
server.send(data.encode('utf-8'))
data_server = server.recv(1024).decode('utf-8')
print('Data from Server: ', data_server)
server.close()
buffer_tablefood = {}
def ThreadSendtoKitchen():
task = threading.Thread(target=SendtoKitchen)
task.start()
########Button########
FB = Frame(GUI)
FB.place(x=650,y=500)
B1 = ttk.Button(FB,text='ทำรายการสำเร็จ',command=ThreadSendtoKitchen)
B1.grid(row=0,column=0,ipadx=20,ipady=10,padx=10)
B2 = ttk.Button(FB,text='เคลียร์')
B2.grid(row=0,column=1,ipadx=20,ipady=10,padx=10)
GUI.mainloop()
|
views.py
|
from django.http.response import HttpResponseNotFound
from joplin_vieweb.edit_session import EditSession
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.conf import settings
from .joplin import Joplin, ReprJsonEncoder
import logging
import json
from bs4 import BeautifulSoup
from pathlib import Path
import mimetypes
from .utils import JoplinSync, mimetype_to_icon, sync_enable, markdown_public_ressource, md_to_html
import threading
from .edit_session import EditSession
from .lasts_notes import LastsNotes
import glob
def conditional_decorator(dec, condition):
def decorator(func):
if not condition:
# Return the function unchanged, not decorated.
return func
return dec(func)
return decorator
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def index(request):
return render(request, 'joplinvieweb/index.html', {"sync_enable": sync_enable()})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def notebooks(request):
joplin = Joplin()
joplin.parse_notebooks();
data = json.dumps(joplin.rootNotebook.children, default=lambda o: o.__dict__, indent=4)
return HttpResponse(data)
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def notes(request, notebook_id):
joplin = Joplin()
if request.method == "GET": # list the notes of this notebook
notes_metadata = joplin.get_notes_metadata(notebook_id)
return render(request, 'joplinvieweb/notes_list.html', {"notes_metadata": notes_metadata})
if request.method == "POST": # create a notebook
data = json.loads(request.body)
title = data["title"]
parent_id = data["parent_id"]
if parent_id == "0":
parent_id = ""
new_notebook_id = joplin.create_notebook(parent_id, title)
return HttpResponse(new_notebook_id)
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def notebook_delete(request, notebook_id):
if request.method == "POST": # delete the notebook
joplin = Joplin()
if notebook_id:
# first get all the notes of that notebook (recursively, pffff ;-) ) to remove them from last notes:
notes_metadata = joplin.get_notes_metadata_recursive(notebook_id)
LastsNotes.delete_notes([one_note.id for one_note in notes_metadata])
joplin.delete_notebook(notebook_id)
return HttpResponse("")
return HttpResponseNotFound("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def notebook_rename(request, notebook_id):
if request.method == "POST": # rename the notebook
data = json.loads(request.body)
title = data["title"]
joplin = Joplin()
joplin.rename_notebook(notebook_id, title)
return HttpResponse("")
return HttpResponseNotFound("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def note(request, note_id, format="html"):
return HttpResponse(note_body_name(note_id, format)[0])
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def note_notebook(request, note_id):
return HttpResponse(Joplin().get_note_notebook(note_id))
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def delete_note(request, note_id):
joplin = Joplin()
joplin.delete_note(note_id)
LastsNotes.delete_note(note_id)
return HttpResponse("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def render_markdown(request):
try:
if request.method == "POST":
md = json.loads(request.body)
md = md["markdown"]
html = md_to_html(md, True)
return HttpResponse(html)
except:
pass
return HttpResponseNotFound('')
def note_body_name(note_id, format, public=False):
note_body, note_name = Joplin().get_note_body_name(note_id)
if public:
note_body = markdown_public_ressource(note_body)
if format == "md":
return (note_body, note_name)
note_body = '[TOC]\n\n' + note_body
html = md_to_html(note_body, False)
# Finally we set an attachment image to the attachments.
# We search for <a href="/joplin/joplin_ressources"> or <a href=":/">
soup = BeautifulSoup(html)
for link in soup.findAll('a'):
if "joplin_ressources" in link.get('href') or ":/" == link.get('href')[0:2]:
mime_type_guess = mimetypes.guess_type(link.get_text())
img = soup.new_tag("span", **{'class':mimetype_to_icon(mime_type_guess)})
br = soup.new_tag("br")
link.insert(0, br)
link.insert(0, img)
link['class'] = link.get('class', []) + ['attachment_link']
link['target'] = '_blank'
toc_item = soup.find('div', {"class": "toc"})
if toc_item:
for one_link in toc_item.findAll('a'):
current_href = str(one_link['href'])
new_link = "javascript:scroll_to('" + current_href + "');"
one_link['href'] = new_link
one_link['target'] = ""
html = str(soup)
# Transform [ ] and [x] to checkboxes.
html = html.replace("<li>[ ] ", '<li><input type="checkbox">');
html = html.replace("<li>[x] ", '<li><input type="checkbox" checked>');
LastsNotes.set_last(note_id, note_name)
return (html, note_name)
def public_note(request, note_id):
joplin = Joplin()
tags = joplin.get_note_tags(note_id)
if "public" in [tag.name for tag in tags] :
return render(request, 'joplinvieweb/public_note.html', {"note_id": note_id})
return HttpResponse("not a public note")
def public_note_data(request, note_id):
joplin = Joplin()
tags = joplin.get_note_tags(note_id)
if "public" in [tag.name for tag in tags] :
body, name = note_body_name(note_id, format="html", public=True)
return HttpResponse(json.dumps({"name": name, "body": body}))
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def note_checkboxes(request, note_id):
cb = json.loads(request.body)
cb = cb["cb"]
Joplin().update_note_checkboxes(note_id, cb)
return HttpResponse("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def pin_note(request, note_id):
if request.method == "POST":
LastsNotes.pin_note(note_id, True)
return HttpResponse("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def unpin_note(request, note_id):
if request.method == "POST":
LastsNotes.pin_note(note_id, False)
return HttpResponse("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def note_tags(request, note_id):
joplin = Joplin()
if request.method == "GET":
note_tags = joplin.get_note_tags(note_id)
return HttpResponse(json.dumps([one_tag.name for one_tag in note_tags]))
# return render(request, 'joplinvieweb/note_tags.html', {"note_tags": note_tags})
if request.method == "POST":
tags = json.loads(request.body)
tags = tags["tags"]
joplin.update_note_tags(note_id, tags)
return HttpResponse("")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def notebooks_error(request):
return render(request, 'joplinvieweb/notebooks_error.html', {})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def notebook_error(request, notebook_id):
return render(request, 'joplinvieweb/notebook_error.html', {"notebook_id": notebook_id})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def tag_notes_error(request, tag_id):
return render(request, 'joplinvieweb/tag_notes_error.html', {"tag_id": tag_id})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def note_error(request):
return render(request, 'joplinvieweb/note_error.html', {})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def joplin_ressource(request, ressource_path):
return joplin_public_ressource(request, ressource_path)
def joplin_public_ressource(request, ressource_path):
try:
joplin = Joplin()
name = joplin.get_ressource_name(ressource_path)
ressources_path = settings.JOPLIN_RESSOURCES_PATH
file_path = Path(ressources_path) / Path(ressource_path)
file_path = glob.glob("{}*".format(file_path))[0]
file_path = Path(file_path)
mime_type_guess = mimetypes.guess_type(file_path.name)
ressource_file = open(file_path, 'rb')
if not name:
name = file_path.name
headers = {}
headers["Content-Disposition"] = 'inline; filename="' + name + '"'
if mime_type_guess is not None:
response = HttpResponse(content=ressource_file, content_type=mime_type_guess[0], headers=headers)
else:
response = HttpResponse(content=ressource_file, headers=headers)
except IOError:
response = HttpResponseNotFound("")
return response
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def tags_error(request):
return render(request, 'joplinvieweb/tags_error.html', {})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def tags(request):
joplin = Joplin()
tags = joplin.get_tags(with_notes=True)
return render(request, 'joplinvieweb/tags_list.html', {"tags": tags})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def all_tags(request):
joplin = Joplin()
tags = joplin._get_tags()
return HttpResponse(json.dumps(tags))
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def tag_notes(request, tag_id):
joplin = Joplin()
notes_metadata = joplin.get_notes_metadata_from_tag(tag_id)
return render(request, 'joplinvieweb/notes_list.html', {"notes_metadata": notes_metadata})
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def sync_data(request):
sync_info = "N/A"
try:
with open(settings.JOPLIN_SYNC_INFO_FILE, "r") as sync_info_content:
sync_info = sync_info_content.read()
except:
logging.error("cannot read synchro file " + settings.JOPLIN_SYNC_INFO_FILE)
return HttpResponse(json.dumps({"info": sync_info, "output": JoplinSync.get_output(), "error": JoplinSync.get_err()}))
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def do_sync(request):
task = threading.Thread(target=JoplinSync.joplin_sync, args=(settings.JOPLIN_SYNC_INFO_FILE,))
task.daemon = True
task.start()
return HttpResponse("Sync started")
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def upload_note_attachment(request, session_id):
if request.method == 'POST':
methods = dir(request.FILES)
for key, value in request.FILES.items():
attachment_id = EditSession.save_file(session_id, value)
return HttpResponse(json.dumps({"data": {"filePath": "/joplin/edit_session_ressource/{}/{}".format(session_id, attachment_id)}}))
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def edit_session(request):
if request.method == 'POST':
session_id = EditSession.create_session()
return HttpResponse(session_id)
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def edit_session_ressource(request, session_id, file):
try:
ressources_path = EditSession.get_path(session_id)
file_path = ressources_path / file
mime_type_guess = mimetypes.guess_type(file_path.name)
ressource_file = open(file_path, 'rb')
if mime_type_guess is not None:
response = HttpResponse(
content=ressource_file, content_type=mime_type_guess[0])
else:
response = HttpResponse(content=ressource_file)
except IOError:
response = HttpResponseNotFound("")
return response
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def edit_session_update_note(request, session_id, note_id):
if request.method == 'POST':
note_data = json.loads(request.body)
# md = str(request.body.decode('utf-8'))
md = note_data["markdown"]
title = note_data["title"]
md = EditSession.create_ressources_and_replace_md(session_id, md)
joplin = Joplin()
joplin.update_note(note_id, title, md)
return HttpResponse()
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def edit_session_create_note(request, session_id, notebook_id):
if request.method == 'POST':
note_data = json.loads(request.body)
# md = str(request.body.decode('utf-8'))
md = note_data["markdown"]
title = note_data["title"]
md = EditSession.create_ressources_and_replace_md(session_id, md)
joplin = Joplin()
note_id = joplin.create_note(notebook_id, title, md)
return HttpResponse(note_id)
@conditional_decorator(login_required, settings.JOPLIN_LOGIN_REQUIRED)
def get_lasts_notes(request):
return HttpResponse(LastsNotes.read_lasts_notes())
|
resnet32_network.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
ResNet v1:
[Deep Residual Learning for Image Recognition
](https://arxiv.org/pdf/1512.03385.pdf)
ResNet v2:
[Identity Mappings in Deep Residual Networks
](https://arxiv.org/pdf/1603.05027.pdf)
Model|n|200-epoch accuracy|Original paper accuracy |sec/epoch GTX1080Ti
:------------|--:|-------:|-----------------------:|---:
ResNet20 v1| 3| 92.16 %| 91.25 %|35
ResNet32 v1| 5| 92.46 %| 92.49 %|50
ResNet44 v1| 7| 92.50 %| 92.83 %|70
ResNet56 v1| 9| 92.71 %| 93.03 %|90
ResNet110 v1| 18| 92.65 %| 93.39+-.16 %|165
ResNet164 v1| 27| - %| 94.07 %| -
ResNet1001 v1|N/A| - %| 92.39 %| -
Model|n|200-epoch accuracy|Original paper accuracy |sec/epoch GTX1080Ti
:------------|--:|-------:|-----------------------:|---:
ResNet20 v2| 2| - %| - %|---
ResNet32 v2|N/A| NA %| NA %| NA
ResNet44 v2|N/A| NA %| NA %| NA
ResNet56 v2| 6| 93.01 %| NA %|100
ResNet110 v2| 12| 93.15 %| 93.63 %|180
ResNet164 v2| 18| - %| 94.54 %| -
ResNet1001 v2|111| - %| 95.08+-.14 %| -
"""
from __future__ import print_function
import tensorflow.keras as keras
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation
from tensorflow.keras.layers import AveragePooling2D, Input, Flatten
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import cifar10
import numpy as np
import os
import multiprocessing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def run_release_gpu(func):
def parallel_wrapper(output_dict, *argv, **kwargs):
ret = func(*argv, **kwargs)
if ret is not None:
output_dict['ret'] = ret
def outer_wrapper(*argv, **kwargs):
same_process = kwargs.pop('same_process', False)
if same_process:
return func(*argv, **kwargs)
with multiprocessing.Manager() as manager:
output = manager.dict()
args = (output, ) + argv
p = multiprocessing.Process(target=parallel_wrapper, args=args, kwargs=kwargs)
p.start()
p.join()
ret_val = output.get('ret', None)
return ret_val
return outer_wrapper
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
@run_release_gpu
def train_resnet(x_train, y_train, x_test, y_test, epochs=10, learning_rate=0.001, momentum=0, weight_decay=0.001, savepath='./samples.txt'):
learning_rate = learning_rate
momentum = momentum
weight_decay = weight_decay
# Training parameters
batch_size = 128 # orig paper trained all networks with batch_size=128
epochs = epochs
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 5
# Model version
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = 1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
elif version == 2:
depth = n * 9 + 2
# Model name, depth and version
model_type = 'ResNet%dv%d' % (depth, version)
# Load the CIFAR10 data.
#(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
# print('x_train shape:', x_train.shape)
# print(x_train.shape[0], 'train samples')
# print(x_test.shape[0], 'test samples')
# print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if version == 2:
model = resnet_v2(input_shape=input_shape, depth=depth)
else:
model = resnet_v1(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, momentum=momentum),
metrics=['accuracy'])
#model.summary()
#print(model_type)
# Prepare model model saving directory.
# save_dir = os.path.join(os.getcwd(), 'saved_models')
# model_name = 'cifar10_%s_model-128.%d.%f.%f.%f.h5' % (model_type, epochs, momentum, learning_rate, weight_decay)
# if not os.path.isdir(save_dir):
# os.makedirs(save_dir)
# filepath = os.path.join(save_dir, model_name)
# # Prepare callbacks for model saving and for learning rate adjustment.
# checkpoint = ModelCheckpoint(filepath=filepath,
# monitor='val_acc',
# verbose=1,
# save_best_only=True)
#lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [lr_reducer]
# Run training, with or without data augmentation.
if not data_augmentation:
#print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
#print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
validation_data=(x_test, y_test),
epochs=epochs, verbose=0, workers=4,
callbacks=callbacks)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
# print('Test loss:', scores[0])
# print('Test accuracy:', scores[1])
# f= open(savepath,"a+")
# f.write("e-%d-wd-%f-lr-%f-m-%f-acc-%f-loss-%f\r\n" % (epochs, weight_decay, learning_rate, momentum, scores[1], scores[0]))
# f.close()
return scores[1]
|
gps_stability_test.py
|
#!/usr/bin/env python3
# flake8: noqa
import os
import sys
import time
import random
import threading
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda, PandaSerial # noqa: E402
INIT_GPS_BAUD = 9600
GPS_BAUD = 460800
def connect():
pandas = Panda.list()
print(pandas)
# make sure two pandas are connected
if len(pandas) != 2:
print("Connect white and grey/black panda to run this test!")
assert False
# connect
pandas[0] = Panda(pandas[0])
pandas[1] = Panda(pandas[1])
white_panda = None
gps_panda = None
# find out which one is white (for spamming the CAN buses)
if pandas[0].is_white() and not pandas[1].is_white():
white_panda = pandas[0]
gps_panda = pandas[1]
elif not pandas[0].is_white() and pandas[1].is_white():
white_panda = pandas[1]
gps_panda = pandas[0]
else:
print("Connect white and grey/black panda to run this test!")
assert False
return white_panda, gps_panda
def spam_buses_thread(panda):
try:
panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
while True:
at = random.randint(1, 2000)
st = (b"test" + os.urandom(10))[0:8]
bus = random.randint(0, 2)
panda.can_send(at, st, bus)
except Exception as e:
print(e)
def read_can_thread(panda):
try:
while True:
panda.can_recv()
except Exception as e:
print(e)
def init_gps(panda):
def add_nmea_checksum(msg):
d = msg[1:]
cs = 0
for i in d:
cs ^= ord(i)
return msg + "*%02X" % cs
ser = PandaSerial(panda, 1, INIT_GPS_BAUD)
# Power cycle the gps by toggling reset
print("Resetting GPS")
panda.set_esp_power(0)
time.sleep(0.5)
panda.set_esp_power(1)
time.sleep(0.5)
# Upping baud rate
print("Upping GPS baud rate")
msg = str.encode(add_nmea_checksum("$PUBX,41,1,0007,0003,%d,0" % GPS_BAUD) + "\r\n")
ser.write(msg)
time.sleep(1) # needs a wait for it to actually send
# Reconnecting with the correct baud
ser = PandaSerial(panda, 1, GPS_BAUD)
# Sending all config messages boardd sends
print("Sending config")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x03\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00\x00\x1E\x7F")
ser.write(b"\xB5\x62\x06\x3E\x00\x00\x44\xD2")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x00\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19\x35")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x01\x00\x00\x00\xC0\x08\x00\x00\x00\x08\x07\x00\x01\x00\x01\x00\x00\x00\x00\x00\xF4\x80")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x04\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1D\x85")
ser.write(b"\xB5\x62\x06\x00\x00\x00\x06\x18")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x01\x08\x22")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x02\x09\x23")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x03\x0A\x24")
ser.write(b"\xB5\x62\x06\x08\x06\x00\x64\x00\x01\x00\x00\x00\x79\x10")
ser.write(b"\xB5\x62\x06\x24\x24\x00\x05\x00\x04\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x5A\x63")
ser.write(b"\xB5\x62\x06\x1E\x14\x00\x00\x00\x00\x00\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3C\x37")
ser.write(b"\xB5\x62\x06\x24\x00\x00\x2A\x84")
ser.write(b"\xB5\x62\x06\x23\x00\x00\x29\x81")
ser.write(b"\xB5\x62\x06\x1E\x00\x00\x24\x72")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x01\x07\x01\x13\x51")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x02\x15\x01\x22\x70")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x02\x13\x01\x20\x6C")
print("Initialized GPS")
received_messages = 0
received_bytes = 0
send_something = False
def gps_read_thread(panda):
global received_messages, received_bytes, send_something
ser = PandaSerial(panda, 1, GPS_BAUD)
while True:
ret = ser.read(1024)
time.sleep(0.001)
if len(ret):
received_messages += 1
received_bytes += len(ret)
if send_something:
ser.write("test")
send_something = False
CHECK_PERIOD = 5
MIN_BYTES = 10000
MAX_BYTES = 50000
min_failures = 0
max_failures = 0
if __name__ == "__main__":
white_panda, gps_panda = connect()
# Start spamming the CAN buses with the white panda. Also read the messages to add load on the GPS panda
threading.Thread(target=spam_buses_thread, args=(white_panda,)).start()
threading.Thread(target=read_can_thread, args=(gps_panda,)).start()
# Start GPS checking
init_gps(gps_panda)
read_thread = threading.Thread(target=gps_read_thread, args=(gps_panda,))
read_thread.start()
while True:
time.sleep(CHECK_PERIOD)
if(received_bytes < MIN_BYTES):
print("Panda is not sending out enough data! Got " + str(received_messages) + " (" + str(received_bytes) + "B) in the last " + str(CHECK_PERIOD) + " seconds")
send_something = True
min_failures += 1
elif(received_bytes > MAX_BYTES):
print("Panda is not sending out too much data! Got " + str(received_messages) + " (" + str(received_bytes) + "B) in the last " + str(CHECK_PERIOD) + " seconds")
print("Probably not on the right baud rate, got reset somehow? Resetting...")
max_failures += 1
init_gps(gps_panda)
else:
print("Got " + str(received_messages) + " (" + str(received_bytes) + "B) messages in the last " + str(CHECK_PERIOD) + " seconds.")
if(min_failures > 0):
print("Total min failures: ", min_failures)
if(max_failures > 0):
print("Total max failures: ", max_failures)
received_messages = 0
received_bytes = 0
|
lenovo_fix.py
|
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import configparser
import dbus
import glob
import gzip
import os
import re
import struct
import subprocess
import sys
from collections import defaultdict
from dbus.mainloop.glib import DBusGMainLoop
from errno import EACCES, EPERM
from gi.repository import GLib
from mmio import MMIO, MMIOError
from multiprocessing import cpu_count
from platform import uname
from threading import Event, Thread
from time import time
DEFAULT_SYSFS_POWER_PATH = '/sys/class/power_supply/AC*/online'
VOLTAGE_PLANES = {'CORE': 0, 'GPU': 1, 'CACHE': 2, 'UNCORE': 3, 'ANALOGIO': 4}
CURRENT_PLANES = {'CORE': 0, 'GPU': 1, 'CACHE': 2}
TRIP_TEMP_RANGE = [40, 97]
UNDERVOLT_KEYS = ('UNDERVOLT', 'UNDERVOLT.AC', 'UNDERVOLT.BATTERY')
ICCMAX_KEYS = ('ICCMAX', 'ICCMAX.AC', 'ICCMAX.BATTERY')
power = {'source': None, 'method': 'polling'}
platform_info_bits = {
'maximum_non_turbo_ratio': [8, 15],
'maximum_efficiency_ratio': [40, 47],
'minimum_operating_ratio': [48, 55],
'feature_ppin_cap': [23, 23],
'feature_programmable_turbo_ratio': [28, 28],
'feature_programmable_tdp_limit': [29, 29],
'number_of_additional_tdp_profiles': [33, 34],
'feature_programmable_temperature_target': [30, 30],
'feature_low_power_mode': [32, 32],
}
thermal_status_bits = {
'thermal_limit_status': [0, 0],
'thermal_limit_log': [1, 1],
'prochot_or_forcepr_status': [2, 2],
'prochot_or_forcepr_log': [3, 3],
'crit_temp_status': [4, 4],
'crit_temp_log': [5, 5],
'thermal_threshold1_status': [6, 6],
'thermal_threshold1_log': [7, 7],
'thermal_threshold2_status': [8, 8],
'thermal_threshold2_log': [9, 9],
'power_limit_status': [10, 10],
'power_limit_log': [11, 11],
'current_limit_status': [12, 12],
'current_limit_log': [13, 13],
'cross_domain_limit_status': [14, 14],
'cross_domain_limit_log': [15, 15],
'cpu_temp': [16, 22],
'temp_resolution': [27, 30],
'reading_valid': [31, 31],
}
supported_cpus = {
'Haswell': (0x3C, 0x3F, 0x45, 0x46),
'Broadwell': (0x3D, 0x47, 0x4F, 0x56),
'Skylake': (0x4E, 0x55),
'Skylake-S': (0x5E,),
'Ice Lake': (0x7E,),
'Kaby Lake (R)': (0x8E, 0x9E),
'Coffee Lake': (0x9E,),
'Cannon Lake': (0x66,),
}
class bcolors:
YELLOW = '\033[93m'
GREEN = '\033[92m'
RED = '\033[91m'
RESET = '\033[0m'
BOLD = '\033[1m'
OK = bcolors.GREEN + bcolors.BOLD + 'OK' + bcolors.RESET
ERR = bcolors.RED + bcolors.BOLD + 'ERR' + bcolors.RESET
LIM = bcolors.YELLOW + bcolors.BOLD + 'LIM' + bcolors.RESET
def fatal(msg, code=1):
print('[E] {:s}'.format(msg), file=sys.stderr)
sys.exit(code)
def warning(msg):
print('[W] {:s}'.format(msg), file=sys.stderr)
def writemsr(msr, val):
msr_list = ['/dev/cpu/{:d}/msr'.format(x) for x in range(cpu_count())]
if not os.path.exists(msr_list[0]):
try:
subprocess.check_call(('modprobe', 'msr'))
except subprocess.CalledProcessError:
fatal('Unable to load the msr module.')
try:
for addr in msr_list:
f = os.open(addr, os.O_WRONLY)
os.lseek(f, msr, os.SEEK_SET)
os.write(f, struct.pack('Q', val))
os.close(f)
except (IOError, OSError) as e:
if e.errno == EPERM or e.errno == EACCES:
fatal(
'Unable to write to MSR. Try to disable Secure Boot '
'and check if your kernel does not restrict access to MSR.'
)
else:
raise e
# returns the value between from_bit and to_bit as unsigned long
def readmsr(msr, from_bit=0, to_bit=63, cpu=None, flatten=False):
assert cpu is None or cpu in range(cpu_count())
if from_bit > to_bit:
fatal('Wrong readmsr bit params')
msr_list = ['/dev/cpu/{:d}/msr'.format(x) for x in range(cpu_count())]
if not os.path.exists(msr_list[0]):
try:
subprocess.check_call(('modprobe', 'msr'))
except subprocess.CalledProcessError:
fatal('Unable to load the msr module.')
try:
output = []
for addr in msr_list:
f = os.open(addr, os.O_RDONLY)
os.lseek(f, msr, os.SEEK_SET)
val = struct.unpack('Q', os.read(f, 8))[0]
os.close(f)
output.append(get_value_for_bits(val, from_bit, to_bit))
if flatten:
return output[0] if len(set(output)) == 1 else output
return output[cpu] if cpu is not None else output
except (IOError, OSError) as e:
if e.errno == EPERM or e.errno == EACCES:
fatal('Unable to read from MSR. Try to disable Secure Boot.')
else:
raise e
def cpu_usage_pct(exit_event, interval=1.0):
last_idle = last_total = 0
for i in range(2):
with open('/proc/stat') as f:
fields = [float(column) for column in f.readline().strip().split()[1:]]
idle, total = fields[3], sum(fields)
idle_delta, total_delta = idle - last_idle, total - last_total
last_idle, last_total = idle, total
if i == 0:
exit_event.wait(interval)
return 100.0 * (1.0 - idle_delta / total_delta)
def get_value_for_bits(val, from_bit=0, to_bit=63):
mask = sum(2 ** x for x in range(from_bit, to_bit + 1))
return (val & mask) >> from_bit
def is_on_battery(config):
try:
for path in glob.glob(config.get('GENERAL', 'Sysfs_Power_Path', fallback=DEFAULT_SYSFS_POWER_PATH)):
with open(path) as f:
return not bool(int(f.read()))
raise
except:
warning('No valid Sysfs_Power_Path found! Trying upower method #1')
try:
out = subprocess.check_output(('upower', '-i', '/org/freedesktop/UPower/devices/line_power_AC'))
res = re.search(rb'online:\s+(yes|no)', out).group(1).decode().strip()
if res == 'yes':
return False
elif res == 'no':
return True
raise
except:
warning('Trying upower method #2')
try:
out = subprocess.check_output(('upower', '-i', '/org/freedesktop/UPower/devices/battery_BAT0'))
res = re.search(rb'state:\s+(.+)', out).group(1).decode().strip()
if res == 'discharging':
return True
elif res in ('fully-charged', 'charging'):
return False
except:
pass
warning('No valid power detection methods found. Assuming that the system is running on battery power.')
return True
def get_cpu_platform_info():
features_msr_value = readmsr(0xCE, cpu=0)
cpu_platform_info = {}
for key, value in platform_info_bits.items():
cpu_platform_info[key] = int(get_value_for_bits(features_msr_value, value[0], value[1]))
return cpu_platform_info
def get_reset_thermal_status():
# read thermal status
thermal_status_msr_value = readmsr(0x19C)
thermal_status = []
for core in range(cpu_count()):
thermal_status_core = {}
for key, value in thermal_status_bits.items():
thermal_status_core[key] = int(get_value_for_bits(thermal_status_msr_value[core], value[0], value[1]))
thermal_status.append(thermal_status_core)
# reset log bits
writemsr(0x19C, 0)
return thermal_status
def get_time_unit():
# 0.000977 is the time unit of my CPU
# TODO formula might be different for other CPUs
return 1.0 / 2 ** readmsr(0x606, 16, 19, cpu=0)
def get_power_unit():
# 0.125 is the power unit of my CPU
# TODO formula might be different for other CPUs
return 1.0 / 2 ** readmsr(0x606, 0, 3, cpu=0)
def get_critical_temp():
# the critical temperature for my CPU is 100 'C
return readmsr(0x1A2, 16, 23, cpu=0)
def get_cur_pkg_power_limits():
value = readmsr(0x610, 0, 55, flatten=True)
return {
'PL1': get_value_for_bits(value, 0, 14),
'TW1': get_value_for_bits(value, 17, 23),
'PL2': get_value_for_bits(value, 32, 46),
'TW2': get_value_for_bits(value, 49, 55),
}
def calc_time_window_vars(t):
time_unit = get_time_unit()
for Y in range(2 ** 5):
for Z in range(2 ** 2):
if t <= (2 ** Y) * (1.0 + Z / 4.0) * time_unit:
return (Y, Z)
raise ValueError('Unable to find a good combination!')
def calc_undervolt_msr(plane, offset):
"""Return the value to be written in the MSR 150h for setting the given
offset voltage (in mV) to the given voltage plane.
"""
assert offset <= 0
assert plane in VOLTAGE_PLANES
offset = int(round(offset * 1.024))
offset = 0xFFE00000 & ((offset & 0xFFF) << 21)
return 0x8000001100000000 | (VOLTAGE_PLANES[plane] << 40) | offset
def calc_undervolt_mv(msr_value):
"""Return the offset voltage (in mV) from the given raw MSR 150h value.
"""
offset = (msr_value & 0xFFE00000) >> 21
offset = offset if offset <= 0x400 else -(0x800 - offset)
return int(round(offset / 1.024))
def get_undervolt(plane=None, convert=False):
planes = [plane] if plane in VOLTAGE_PLANES else VOLTAGE_PLANES
out = {}
for plane in planes:
writemsr(0x150, 0x8000001000000000 | (VOLTAGE_PLANES[plane] << 40))
read_value = readmsr(0x150, flatten=True) & 0xFFFFFFFF
out[plane] = calc_undervolt_mv(read_value) if convert else read_value
return out
def undervolt(config):
for plane in VOLTAGE_PLANES:
write_offset_mv = config.getfloat(
'UNDERVOLT.{:s}'.format(power['source']), plane, fallback=config.getfloat('UNDERVOLT', plane, fallback=0.0)
)
write_value = calc_undervolt_msr(plane, write_offset_mv)
writemsr(0x150, write_value)
if args.debug:
write_value &= 0xFFFFFFFF
read_value = get_undervolt(plane)[plane]
read_offset_mv = calc_undervolt_mv(read_value)
match = OK if write_value == read_value else ERR
print(
'[D] Undervolt plane {:s} - write {:.0f} mV ({:#x}) - read {:.0f} mV ({:#x}) - match {}'.format(
plane, write_offset_mv, write_value, read_offset_mv, read_value, match
)
)
def calc_icc_max_msr(plane, current):
"""Return the value to be written in the MSR 150h for setting the given
IccMax (in A) to the given current plane.
"""
assert 0 < current <= 0x3FF
assert plane in CURRENT_PLANES
current = int(round(current * 4))
return 0x8000001700000000 | (CURRENT_PLANES[plane] << 40) | current
def calc_icc_max_amp(msr_value):
"""Return the max current (in A) from the given raw MSR 150h value.
"""
return (msr_value & 0x3FF) / 4.0
def get_icc_max(plane=None, convert=False):
planes = [plane] if plane in CURRENT_PLANES else CURRENT_PLANES
out = {}
for plane in planes:
writemsr(0x150, 0x8000001600000000 | (CURRENT_PLANES[plane] << 40))
read_value = readmsr(0x150, flatten=True) & 0x3FF
out[plane] = calc_icc_max_amp(read_value) if convert else read_value
return out
def set_icc_max(config):
for plane in CURRENT_PLANES:
try:
write_current_amp = config.getfloat(
'ICCMAX.{:s}'.format(power['source']), plane, fallback=config.getfloat('ICCMAX', plane, fallback=-1.0)
)
if write_current_amp > 0:
write_value = calc_icc_max_msr(plane, write_current_amp)
writemsr(0x150, write_value)
if args.debug:
write_value &= 0x3FF
read_value = get_icc_max(plane)[plane]
read_current_A = calc_icc_max_amp(read_value)
match = OK if write_value == read_value else ERR
print(
'[D] IccMax plane {:s} - write {:.2f} A ({:#x}) - read {:.2f} A ({:#x}) - match {}'.format(
plane, write_current_amp, write_value, read_current_A, read_value, match
)
)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
def load_config():
config = configparser.ConfigParser()
config.read(args.config)
# config values sanity check
for power_source in ('AC', 'BATTERY'):
for option in ('Update_Rate_s', 'PL1_Tdp_W', 'PL1_Duration_s', 'PL2_Tdp_W', 'PL2_Duration_S'):
value = config.getfloat(power_source, option, fallback=None)
if value is not None:
value = config.set(power_source, option, str(max(0.1, value)))
elif option == 'Update_Rate_s':
fatal('The mandatory "Update_Rate_s" parameter is missing.')
trip_temp = config.getfloat(power_source, 'Trip_Temp_C', fallback=None)
if trip_temp is not None:
valid_trip_temp = min(TRIP_TEMP_RANGE[1], max(TRIP_TEMP_RANGE[0], trip_temp))
if trip_temp != valid_trip_temp:
config.set(power_source, 'Trip_Temp_C', str(valid_trip_temp))
print(
'[!] Overriding invalid "Trip_Temp_C" value in "{:s}": {:.1f} -> {:.1f}'.format(
power_source, trip_temp, valid_trip_temp
)
)
# fix any invalid value (ie. > 0) in the undervolt settings
for key in UNDERVOLT_KEYS:
for plane in VOLTAGE_PLANES:
if key in config:
value = config.getfloat(key, plane)
valid_value = min(0, value)
if value != valid_value:
config.set(key, plane, str(valid_value))
print(
'[!] Overriding invalid "{:s}" value in "{:s}" voltage plane: {:.0f} -> {:.0f}'.format(
key, plane, value, valid_value
)
)
# handle the case where only one of UNDERVOLT.AC, UNDERVOLT.BATTERY keys exists
# by forcing the other key to all zeros (ie. no undervolt)
if any(key in config for key in UNDERVOLT_KEYS[1:]):
for key in UNDERVOLT_KEYS[1:]:
if key not in config:
config.add_section(key)
for plane in VOLTAGE_PLANES:
value = config.getfloat(key, plane, fallback=0.0)
config.set(key, plane, str(value))
iccmax_enabled = False
# check for invalid values (ie. <= 0 or > 0x3FF) in the IccMax settings
for key in ICCMAX_KEYS:
for plane in CURRENT_PLANES:
if key in config:
try:
value = config.getfloat(key, plane)
if value <= 0 or value >= 0x3FF:
raise ValueError
iccmax_enabled = True
except ValueError:
warning('Invalid value for {:s} in {:s}'.format(plane, key))
config.remove_option(key, plane)
except configparser.NoOptionError:
pass
if iccmax_enabled:
warning('Warning! Raising IccMax above design limits can damage your system!')
return config
def calc_reg_values(platform_info, config):
regs = defaultdict(dict)
for power_source in ('AC', 'BATTERY'):
if platform_info['feature_programmable_temperature_target'] != 1:
warning("Setting temperature target is not supported by this CPU")
else:
# the critical temperature for my CPU is 100 'C
critical_temp = get_critical_temp()
# update the allowed temp range to keep at least 3 'C from the CPU critical temperature
global TRIP_TEMP_RANGE
TRIP_TEMP_RANGE[1] = min(TRIP_TEMP_RANGE[1], critical_temp - 3)
Trip_Temp_C = config.getfloat(power_source, 'Trip_Temp_C', fallback=None)
if Trip_Temp_C is not None:
trip_offset = int(round(critical_temp - Trip_Temp_C))
regs[power_source]['MSR_TEMPERATURE_TARGET'] = trip_offset << 24
else:
print('[I] {:s} trip temperature is disabled in config.'.format(power_source))
power_unit = get_power_unit()
PL1_Tdp_W = config.getfloat(power_source, 'PL1_Tdp_W', fallback=None)
PL1_Duration_s = config.getfloat(power_source, 'PL1_Duration_s', fallback=None)
PL2_Tdp_W = config.getfloat(power_source, 'PL2_Tdp_W', fallback=None)
PL2_Duration_s = config.getfloat(power_source, 'PL2_Duration_s', fallback=None)
if (PL1_Tdp_W, PL1_Duration_s, PL2_Tdp_W, PL2_Duration_s).count(None) < 4:
cur_pkg_power_limits = get_cur_pkg_power_limits()
if PL1_Tdp_W is None:
PL1 = cur_pkg_power_limits['PL1']
print('[I] {:s} PL1_Tdp_W disabled in config.'.format(power_source))
else:
PL1 = int(round(PL1_Tdp_W / power_unit))
if PL1_Duration_s is None:
TW1 = cur_pkg_power_limits['TW1']
print('[I] {:s} PL1_Duration_s disabled in config.'.format(power_source))
else:
Y, Z = calc_time_window_vars(PL1_Duration_s)
TW1 = Y | (Z << 5)
if PL2_Tdp_W is None:
PL2 = cur_pkg_power_limits['PL2']
print('[I] {:s} PL2_Tdp_W disabled in config.'.format(power_source))
else:
PL2 = int(round(PL2_Tdp_W / power_unit))
if PL2_Duration_s is None:
TW2 = cur_pkg_power_limits['TW2']
print('[I] {:s} PL2_Duration_s disabled in config.'.format(power_source))
else:
Y, Z = calc_time_window_vars(PL2_Duration_s)
TW2 = Y | (Z << 5)
regs[power_source]['MSR_PKG_POWER_LIMIT'] = (
PL1 | (1 << 15) | (TW1 << 17) | (PL2 << 32) | (1 << 47) | (TW2 << 49)
)
else:
print('[I] {:s} package power limits are disabled in config.'.format(power_source))
# cTDP
c_tdp_target_value = config.getint(power_source, 'cTDP', fallback=None)
if c_tdp_target_value is not None:
if platform_info['feature_programmable_tdp_limit'] != 1:
print("[W] cTDP setting not supported by this CPU")
elif platform_info['number_of_additional_tdp_profiles'] < c_tdp_target_value:
print("[W] the configured cTDP profile is not supported by this CPU")
else:
valid_c_tdp_target_value = max(0, c_tdp_target_value)
regs[power_source]['MSR_CONFIG_TDP_CONTROL'] = valid_c_tdp_target_value
return regs
def set_hwp(pref):
# set HWP energy performance hints
assert pref in ('performance', 'balance_performance', 'default', 'balance_power', 'power')
CPUs = [
'/sys/devices/system/cpu/cpu{:d}/cpufreq/energy_performance_preference'.format(x) for x in range(cpu_count())
]
for i, c in enumerate(CPUs):
with open(c, 'wb') as f:
f.write(pref.encode())
if args.debug:
with open(c) as f:
read_value = f.read().strip()
match = OK if pref == read_value else ERR
print('[D] HWP for cpu{:d} - write "{:s}" - read "{:s}" - match {}'.format(i, pref, read_value, match))
def power_thread(config, regs, exit_event):
try:
mchbar_mmio = MMIO(0xFED159A0, 8)
except MMIOError:
fatal('Unable to open /dev/mem. Try to disable Secure Boot.')
while not exit_event.is_set():
# print thermal status
if args.debug:
thermal_status = get_reset_thermal_status()
for index, core_thermal_status in enumerate(thermal_status):
for key, value in core_thermal_status.items():
print('[D] core {} thermal status: {} = {}'.format(index, key.replace("_", " "), value))
# switch back to sysfs polling
if power['method'] == 'polling':
power['source'] = 'BATTERY' if is_on_battery(config) else 'AC'
# set temperature trip point
if 'MSR_TEMPERATURE_TARGET' in regs[power['source']]:
write_value = regs[power['source']]['MSR_TEMPERATURE_TARGET']
writemsr(0x1A2, write_value)
if args.debug:
read_value = readmsr(0x1A2, 24, 29, flatten=True)
match = OK if write_value >> 24 == read_value else ERR
print(
'[D] TEMPERATURE_TARGET - write {:#x} - read {:#x} - match {}'.format(
write_value >> 24, read_value, match
)
)
# set cTDP
if 'MSR_CONFIG_TDP_CONTROL' in regs[power['source']]:
write_value = regs[power['source']]['MSR_CONFIG_TDP_CONTROL']
writemsr(0x64B, write_value)
if args.debug:
read_value = readmsr(0x64B, 0, 1, flatten=True)
match = OK if write_value == read_value else ERR
print(
'[D] CONFIG_TDP_CONTROL - write {:#x} - read {:#x} - match {}'.format(
write_value, read_value, match
)
)
# set PL1/2 on MSR
write_value = regs[power['source']]['MSR_PKG_POWER_LIMIT']
writemsr(0x610, write_value)
if args.debug:
read_value = readmsr(0x610, 0, 55, flatten=True)
match = OK if write_value == read_value else ERR
print(
'[D] MSR PACKAGE_POWER_LIMIT - write {:#x} - read {:#x} - match {}'.format(
write_value, read_value, match
)
)
# set MCHBAR register to the same PL1/2 values
mchbar_mmio.write32(0, write_value & 0xFFFFFFFF)
mchbar_mmio.write32(4, write_value >> 32)
if args.debug:
read_value = mchbar_mmio.read32(0) | (mchbar_mmio.read32(4) << 32)
match = OK if write_value == read_value else ERR
print(
'[D] MCHBAR PACKAGE_POWER_LIMIT - write {:#x} - read {:#x} - match {}'.format(
write_value, read_value, match
)
)
wait_t = config.getfloat(power['source'], 'Update_Rate_s')
enable_hwp_mode = config.getboolean('AC', 'HWP_Mode', fallback=False)
if power['source'] == 'AC' and enable_hwp_mode:
cpu_usage = cpu_usage_pct(exit_event, interval=wait_t)
# set full performance mode only when load is greater than this threshold (~ at least 1 core full speed)
performance_mode = cpu_usage > 100.0 / (cpu_count() * 1.25)
# check again if we are on AC, since in the meantime we might have switched to BATTERY
if not is_on_battery(config):
set_hwp('performance' if performance_mode else 'balance_performance')
else:
exit_event.wait(wait_t)
def check_kernel():
if os.geteuid() != 0:
fatal('No root no party. Try again with sudo.')
kernel_config = None
try:
with open(os.path.join('/boot', 'config-{:s}'.format(uname()[2]))) as f:
kernel_config = f.read()
except IOError:
config_gz_path = os.path.join('/proc', 'config.gz')
try:
if not os.path.isfile(config_gz_path):
subprocess.check_call(('modprobe', 'configs'))
with gzip.open(config_gz_path) as f:
kernel_config = f.read().decode()
except (subprocess.CalledProcessError, IOError):
pass
if kernel_config is None:
print('[W] Unable to obtain and validate kernel config.')
elif not re.search('CONFIG_DEVMEM=y', kernel_config):
fatal('Bad kernel config: you need CONFIG_DEVMEM=y.')
elif not re.search('CONFIG_X86_MSR=(y|m)', kernel_config):
fatal('Bad kernel config: you need CONFIG_X86_MSR builtin or as module.')
def check_cpu():
try:
with open('/proc/cpuinfo') as f:
cpuinfo = {}
for row in f.readlines():
try:
key, value = map(lambda x: x.strip(), row.split(':'))
if key == 'processor' and value == '1':
break
try:
cpuinfo[key] = int(value, 0)
except ValueError:
cpuinfo[key] = value
except ValueError:
pass
if cpuinfo['vendor_id'] != 'GenuineIntel':
fatal('This tool is designed for Intel CPUs only.')
cpu_model = None
for model in supported_cpus:
if cpuinfo['model'] in supported_cpus[model]:
cpu_model = model
break
if cpuinfo['cpu family'] != 6 or cpu_model is None:
fatal('Your CPU model is not supported.')
print('[I] Detected CPU architecture: Intel {:s}'.format(cpu_model))
except:
fatal('Unable to identify CPU model.')
def monitor(exit_event, wait):
IA32_THERM_STATUS = 0x19C
IA32_PERF_STATUS = 0x198
MSR_RAPL_POWER_UNIT = 0x606
MSR_INTEL_PKG_ENERGY_STATUS = 0x611
MSR_PP1_ENERGY_STATUS = 0x641
MSR_DRAM_ENERGY_STATUS = 0x619
wait = max(0.1, wait)
rapl_power_unit = 0.5 ** readmsr(MSR_RAPL_POWER_UNIT, from_bit=8, to_bit=12, cpu=0)
power_plane_msr = {
'Package': MSR_INTEL_PKG_ENERGY_STATUS,
'Graphics': MSR_PP1_ENERGY_STATUS,
'DRAM': MSR_DRAM_ENERGY_STATUS,
}
prev_energy = {
'Package': (readmsr(MSR_INTEL_PKG_ENERGY_STATUS, cpu=0) * rapl_power_unit, time()),
'Graphics': (readmsr(MSR_PP1_ENERGY_STATUS, cpu=0) * rapl_power_unit, time()),
'DRAM': (readmsr(MSR_DRAM_ENERGY_STATUS, cpu=0) * rapl_power_unit, time()),
}
undervolt_values = get_undervolt(convert=True)
undervolt_output = ' | '.join('{:s}: {:.2f} mV'.format(plane, undervolt_values[plane]) for plane in VOLTAGE_PLANES)
print('[D] Undervolt offsets: {:s}'.format(undervolt_output))
iccmax_values = get_icc_max(convert=True)
iccmax_output = ' | '.join('{:s}: {:.2f} A'.format(plane, iccmax_values[plane]) for plane in CURRENT_PLANES)
print('[D] IccMax: {:s}'.format(iccmax_output))
print('[D] Realtime monitoring of throttling causes:\n')
while not exit_event.is_set():
value = readmsr(IA32_THERM_STATUS, from_bit=0, to_bit=15, cpu=0)
offsets = {'Thermal': 0, 'Power': 10, 'Current': 12, 'Cross-domain (e.g. GPU)': 14}
output = ('{:s}: {:s}'.format(cause, LIM if bool((value >> offsets[cause]) & 1) else OK) for cause in offsets)
# ugly code, just testing...
vcore = readmsr(IA32_PERF_STATUS, from_bit=32, to_bit=47, cpu=0) / (2.0 ** 13) * 1000
stats2 = {'VCore': '{:.0f} mV'.format(vcore)}
for power_plane in ('Package', 'Graphics', 'DRAM'):
energy_j = readmsr(power_plane_msr[power_plane], cpu=0) * rapl_power_unit
now = time()
prev_energy[power_plane], energy_w = (
(energy_j, now),
(energy_j - prev_energy[power_plane][0]) / (now - prev_energy[power_plane][1]),
)
stats2[power_plane] = '{:.1f} W'.format(energy_w)
output2 = ('{:s}: {:s}'.format(label, stats2[label]) for label in stats2)
print('[{}] {} || {}{}'.format(power['source'], ' - '.join(output), ' - '.join(output2), ' ' * 10), end='\r')
exit_event.wait(wait)
def main():
global args
parser = argparse.ArgumentParser()
exclusive_group = parser.add_mutually_exclusive_group()
exclusive_group.add_argument('--debug', action='store_true', help='add some debug info and additional checks')
exclusive_group.add_argument(
'--monitor',
metavar='update_rate',
const=1.0,
type=float,
nargs='?',
help='realtime monitoring of throttling causes (default 1s)',
)
parser.add_argument('--config', default='/etc/lenovo_fix.conf', help='override default config file path')
parser.add_argument('--force', action='store_true', help='bypass compatibility checks (EXPERTS only)')
args = parser.parse_args()
if not args.force:
check_kernel()
check_cpu()
print('[I] Loading config file.')
config = load_config()
power['source'] = 'BATTERY' if is_on_battery(config) else 'AC'
platform_info = get_cpu_platform_info()
if args.debug:
for key, value in platform_info.items():
print('[D] cpu platform info: {} = {}'.format(key.replace("_", " "), value))
regs = calc_reg_values(platform_info, config)
if not config.getboolean('GENERAL', 'Enabled'):
return
exit_event = Event()
thread = Thread(target=power_thread, args=(config, regs, exit_event))
thread.daemon = True
thread.start()
undervolt(config)
set_icc_max(config)
# handle dbus events for applying undervolt/IccMax on resume from sleep/hybernate
def handle_sleep_callback(sleeping):
if not sleeping:
undervolt(config)
set_icc_max(config)
def handle_ac_callback(*args):
try:
power['source'] = 'BATTERY' if args[1]['Online'] == 0 else 'AC'
power['method'] = 'dbus'
except:
power['method'] = 'polling'
DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
# add dbus receiver only if undervolt/IccMax is enabled in config
if any(
config.getfloat(key, plane, fallback=0) != 0 for plane in VOLTAGE_PLANES for key in UNDERVOLT_KEYS + ICCMAX_KEYS
):
bus.add_signal_receiver(
handle_sleep_callback, 'PrepareForSleep', 'org.freedesktop.login1.Manager', 'org.freedesktop.login1'
)
bus.add_signal_receiver(
handle_ac_callback,
signal_name="PropertiesChanged",
dbus_interface="org.freedesktop.DBus.Properties",
path="/org/freedesktop/UPower/devices/line_power_AC",
)
print('[I] Starting main loop.')
if args.monitor is not None:
monitor_thread = Thread(target=monitor, args=(exit_event, args.monitor))
monitor_thread.daemon = True
monitor_thread.start()
try:
loop = GLib.MainLoop()
loop.run()
except (KeyboardInterrupt, SystemExit):
pass
exit_event.set()
loop.quit()
thread.join(timeout=1)
if args.monitor is not None:
monitor_thread.join(timeout=0.1)
if __name__ == '__main__':
main()
|
client.py
|
from cmd import Cmd
from os import path
from random import choice
from string import ascii_lowercase
from project8.server import Node, UNHANDLED
from threading import Thread
from time import sleep
from xmlrpc.client import ServerProxy, Fault
import sys
HEAD_START = 0.1 # Seconds
SECRET_LENGTH = 100
def randomString(length):
chars = []
letters = ascii_lowercase[:26]
while length > 0:
length -= 1
chars.append(choice(letters))
return ''.join(chars)
class Client(Cmd):
prompt = '> '
def __init__(self, url, dirname, urlfile):
Cmd.__init__(self)
self.secret = randomString(SECRET_LENGTH)
n = Node(url, dirname, self.secret)
t = Thread(target=n._start)
t.setDaemon(1)
t.start()
sleep(HEAD_START)
self.server = ServerProxy(url)
urlfile = path.join(dirname, urlfile)
for line in open(urlfile):
line = line.strip()
self.server.hello(line)
def do_fetch(self, arg):
try:
self.server.fetch(arg, self.secret)
except Fault as f:
if f.faultCode != UNHANDLED: raise
print("Couldn't find the file", arg)
def do_exit(self, arg):
print()
sys.exit()
do_EOF = do_exit
def main():
urlfile, directory, url = sys.argv[1:]
client = Client(url, directory, urlfile)
client.cmdloop()
if __name__ == '__main__': main()
|
simple-tcp-server.py
|
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print("[*] Listening on %s:%d" % (bind_ip,bind_port))
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print("[*] Received: %s" % request)
# send back a packet
client_socket.send("ACK!")
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connection from: %s:%d" % (addr[0],addr[1]))
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
test_worker.py
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
from datetime import timedelta
from time import sleep
import signal
import time
from multiprocessing import Process
import subprocess
import sys
from unittest import skipIf
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (create_file, create_file_after_timeout,
div_by_zero, do_nothing, say_hello, say_pid,
run_dummy_heroku_worker, access_self,
modify_self, modify_self_and_error)
from rq import (get_failed_queue, Queue, SimpleWorker, Worker,
get_current_connection)
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus
from rq.registry import StartedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.worker import HerokuWorker, WorkerStatus
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
w.register_death()
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed queue."""
q = Queue()
failed_q = get_failed_queue()
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,))
job.save()
data = self.testconn.hget(job.key, 'data')
invalid_data = data.replace(b'div_by_zero', b'nonexisting')
assert data != invalid_data
self.testconn.hset(job.key, 'data', invalid_data)
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
failed_q = get_failed_queue()
# Preconditions
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertIsNotNone(job.exc_info) # should contain exc_info
def test_custom_exc_handling(self):
"""Custom exception handling."""
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
failed_q = get_failed_queue()
# Preconditions
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
w = Worker([q], exception_handlers=black_hole)
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 0)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.is_failed, True)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn._ttl(job.key), 0)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn._ttl(job.key), -1)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertEqual(type(death_date).__name__, 'datetime')
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, 1, 'foo')
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, 1, 'bar')
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, 1, 'foo')
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(set(job_check.meta.keys()),
set(['foo', 'baz', 'newinfo']))
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
failed_q = get_failed_queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(set(job_check.meta.keys()),
set(['foo', 'baz', 'newinfo']))
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def kill_worker(pid, double_kill):
# wait for the worker to be started over on the main process
time.sleep(0.5)
os.kill(pid, signal.SIGTERM)
if double_kill:
# give the worker time to switch signal handler
time.sleep(0.5)
os.kill(pid, signal.SIGTERM)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""worker with an ongoing job receiving double SIGTERM signal and shutting down immediately"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
failed_q = get_failed_queue()
self.assertEqual(failed_q.count, 0)
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
self.assertEqual(failed_q.count, 1)
self.assertEqual(fooq.count, 0)
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
assert get_failed_queue().count == 0
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
assert get_failed_queue().count == 0
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGALRM)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
def test_handle_shutdown_request(self):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""Test that handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
def test_move_to_failed_queue_handles_non_ascii_in_exception_message(self):
"""Test that move_to_failed_queue doesn't crash on non-ascii in exception message."""
self.worker.move_to_failed_queue(Mock(), *self.exc_info)
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
self.worker.failed_queue = Mock()
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except:
self.exc_info = sys.exc_info()
|
utils_test.py
|
from __future__ import annotations
import asyncio
import concurrent.futures
import contextlib
import copy
import functools
import gc
import inspect
import io
import logging
import logging.config
import multiprocessing
import os
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import weakref
from collections import defaultdict
from collections.abc import Callable
from contextlib import contextmanager, nullcontext, suppress
from itertools import count
from time import sleep
from typing import Any, Literal
from distributed.compatibility import MACOS
from distributed.scheduler import Scheduler
try:
import ssl
except ImportError:
ssl = None # type: ignore
import pytest
import yaml
from tlz import assoc, memoize, merge
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from distributed import system
from distributed import versions as version_module
from distributed.client import Client, _global_clients, default_client
from distributed.comm import Comm
from distributed.comm.tcp import TCP, BaseTCPConnector
from distributed.compatibility import WINDOWS
from distributed.config import initialize_logging
from distributed.core import CommClosedError, ConnectionPool, Status, connect, rpc
from distributed.deploy import SpecCluster
from distributed.diagnostics.plugin import WorkerPlugin
from distributed.metrics import time
from distributed.nanny import Nanny
from distributed.node import ServerNode
from distributed.proctitle import enable_proctitle_on_children
from distributed.security import Security
from distributed.utils import (
DequeHandler,
TimeoutError,
_offload_executor,
get_ip,
get_ipv6,
iscoroutinefunction,
log_errors,
mp_context,
reset_logger_locks,
sync,
)
from distributed.worker import Worker
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_TEST_TIMEOUT = 30
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
class _UnhashableCallable:
# FIXME https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
def __call__(self, x):
return x + 1
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict: defaultdict[str, int] = defaultdict(int)
_varying_key_gen = count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues: dict[Any, asyncio.Queue] = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, config, port=0, **kwargs):
with dask.config.set(config):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
try:
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
except Exception as exc:
for i in range(nputs):
q.put(exc)
else:
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
pid = os.getpid()
try:
worker = await Worker(scheduler_addr, validate=True, **kwargs)
except Exception as exc:
q.put((pid, exc))
else:
q.put((pid, worker.address))
await worker.finished()
# Scheduler might've failed
if isinstance(scheduler_addr, str):
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
pid = os.getpid()
try:
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
except Exception as exc:
q.put((pid, exc))
else:
q.put((pid, worker.address))
await worker.finished()
# Scheduler might've failed
if isinstance(scheduler_addr, str):
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
# Compatibility. A lot of tests simply use `c` as fixture name
c = client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
def _terminate_join(proc):
proc.terminate()
proc.join()
proc.close()
def _close_queue(q):
q.close()
q.join_thread()
q._writer.close() # https://bugs.python.org/issue42752
class _SafeTemporaryDirectory(tempfile.TemporaryDirectory):
def __exit__(self, exc_type, exc_val, exc_tb):
try:
return super().__exit__(exc_type, exc_val, exc_tb)
except (PermissionError, NotADirectoryError):
# It appears that we either have a process still interacting with
# the tmpdirs of the workers or that win process are not releasing
# their lock in time. We are receiving PermissionErrors during
# teardown
# See also https://github.com/dask/distributed/pull/5825
pass
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=10,
disconnect_timeout=20,
scheduler_kwargs={},
config={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
with contextlib.ExitStack() as stack:
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
stack.callback(_close_queue, scheduler_q)
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1, config),
kwargs=scheduler_kwargs,
daemon=True,
)
ws.add(scheduler)
scheduler.start()
stack.callback(_terminate_join, scheduler)
# Launch workers
workers_by_pid = {}
q = mp_context.Queue()
stack.callback(_close_queue, q)
for _ in range(nworkers):
tmpdirname = stack.enter_context(
_SafeTemporaryDirectory(prefix="_dask_test_worker")
)
kwargs = merge(
{
"nthreads": 1,
"local_directory": tmpdirname,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q, config),
kwargs=kwargs,
)
ws.add(proc)
proc.start()
stack.callback(_terminate_join, proc)
workers_by_pid[proc.pid] = {"proc": proc}
saddr_or_exception = scheduler_q.get()
if isinstance(saddr_or_exception, Exception):
raise saddr_or_exception
saddr = saddr_or_exception
for _ in range(nworkers):
pid, addr_or_exception = q.get()
if isinstance(addr_or_exception, Exception):
raise addr_or_exception
workers_by_pid[pid]["address"] = addr_or_exception
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {
"connection_args": security.get_connection_args("client")
}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers_by_pid.values()
]
finally:
logger.debug("Closing out test cluster")
alive_workers = [
w["address"]
for w in workers_by_pid.values()
if w["proc"].is_alive()
]
loop.run_sync(
lambda: disconnect_all(
alive_workers,
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
if scheduler.is_alive():
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with rpc(addr, **rpc_kwargs) as w:
# If the worker was killed hard (e.g. sigterm) during test runtime,
# we do not know at this point and may not be able to connect
with suppress(EnvironmentError, CommClosedError):
# Do not request a reply since comms will be closed by the
# worker before a reply can be made and we will always trigger
# the timeout
await w.terminate(reply=False)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*(disconnect(addr, timeout, rpc_kwargs) for addr in addresses))
def gen_test(timeout: float = _TEST_TIMEOUT) -> Callable[[Callable], Callable]:
"""Coroutine test
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_test(timeout=5)
async def test_foo(param)
await ... # use tornado coroutines
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
def _(func):
def test_func(*args, **kwargs):
with clean() as loop:
injected_func = functools.partial(func, *args, **kwargs)
if iscoroutinefunction(func):
cor = injected_func
else:
cor = gen.coroutine(injected_func)
loop.run_sync(cor, timeout=timeout)
# Patch the signature so pytest can inject fixtures
test_func.__signature__ = inspect.signature(func)
return test_func
return _
async def start_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]],
scheduler_addr: str,
loop: IOLoop,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
) -> tuple[Scheduler, list[ServerNode]]:
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(
merge(worker_kwargs, ncore[2]) # type: ignore
if len(ncore) > 2
else worker_kwargs
),
)
for i, ncore in enumerate(nthreads)
]
await asyncio.gather(*workers)
start = time()
while (
len(s.workers) < len(nthreads)
or any(ws.status != Status.running for ws in s.workers.values())
or any(comm.comm is None for comm in s.stream_comms.values())
):
await asyncio.sleep(0.01)
if time() > start + 30:
await asyncio.gather(*(w.close(timeout=1) for w in workers))
await s.close(fast=True)
raise TimeoutError("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*(end_worker(w) for w in workers))
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]] = [
("127.0.0.1", 1),
("127.0.0.1", 2),
],
scheduler="127.0.0.1",
timeout: float = _TEST_TIMEOUT,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
client: bool = False,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
client_kwargs: dict[str, Any] = {},
active_rpc_timeout: float = 1,
config: dict[str, Any] = {},
clean_kwargs: dict[str, Any] = {},
allow_unclosed: bool = False,
cluster_dump_directory: str | Literal[False] = "test_cluster_dump",
) -> Callable[[Callable], Callable]:
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, param):
await ... # use tornado coroutines
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, pytest_fixture_a, pytest_fixture_b):
await ... # use tornado coroutines
See also:
start
end
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
scheduler_kwargs = merge(
{"dashboard": False, "dashboard_address": ":0"}, scheduler_kwargs
)
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 15}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
raise RuntimeError("gen_cluster only works for coroutine functions.")
@functools.wraps(func)
def test_func(*outer_args, **kwargs):
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for _ in range(60):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster: "
f"{e.__class__.__name__}: {e}; retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = asyncio.wait_for(asyncio.shield(task), timeout)
result = await coro2
if s.validate:
s.validate_state()
except asyncio.TimeoutError:
assert task
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
if cluster_dump_directory:
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio and
# not from the code being tested.
raise TimeoutError(
f"Test timeout after {timeout}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
except pytest.xfail.Exception:
raise
except Exception:
if cluster_dump_directory and not has_pytestmark(
test_func, "xfail"
):
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
raise
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 60:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except OSError:
# zict backends can fail if their storage directory
# was already removed
pass
return result
# Patch the signature so pytest can inject fixtures
orig_sig = inspect.signature(func)
args = [None] * (1 + len(nthreads)) # scheduler, *workers
if client:
args.insert(0, None)
bound = orig_sig.bind_partial(*args)
test_func.__signature__ = orig_sig.replace(
parameters=[
p
for name, p in orig_sig.parameters.items()
if name not in bound.arguments
]
)
return test_func
return _
async def dump_cluster_state(
s: Scheduler, ws: list[ServerNode], output_dir: str, func_name: str
) -> None:
"""A variant of Client.dump_cluster_state, which does not rely on any of the below
to work:
- Having a client at all
- Client->Scheduler comms
- Scheduler->Worker comms (unless using Nannies)
"""
scheduler_info = s._to_dict()
workers_info: dict[str, Any]
versions_info = version_module.get_versions()
if not ws or isinstance(ws[0], Worker):
workers_info = {w.address: w._to_dict() for w in ws}
else:
workers_info = await s.broadcast(msg={"op": "dump_state"}, on_error="return")
workers_info = {
k: repr(v) if isinstance(v, Exception) else v
for k, v in workers_info.items()
}
state = {
"scheduler": scheduler_info,
"workers": workers_info,
"versions": versions_info,
}
os.makedirs(output_dir, exist_ok=True)
fname = os.path.join(output_dir, func_name) + ".yaml"
with open(fname, "w") as fh:
yaml.safe_dump(state, fh) # Automatically convert tuples to lists
print(f"Dumped cluster state to {fname}")
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def _terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(30)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args: list[str], flush_output: bool = True, **kwargs):
"""Start a shell command in a subprocess.
Yields a subprocess.Popen object.
stderr is redirected to stdout.
stdout is redirected to a pipe.
Parameters
----------
args: list[str]
Command line arguments
flush_output: bool, optional
If True (the default), the stdout/stderr pipe is emptied while it is being
filled. Set to False if you wish to read the output yourself. Note that setting
this to False and then failing to periodically read from the pipe may result in
a deadlock due to the pipe getting full.
kwargs: optional
optional arguments to subprocess.Popen
"""
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
if flush_output:
ex = concurrent.futures.ThreadPoolExecutor(1)
flush_future = ex.submit(proc.communicate)
try:
yield proc
# asyncio.CancelledError is raised by @gen_test/@gen_cluster timeout
except (Exception, asyncio.CancelledError):
dump_stdout = True
raise
finally:
try:
_terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
if flush_output:
out, err = flush_future.result()
ex.shutdown()
else:
out, err = proc.communicate()
assert not err
if dump_stdout:
print("\n" + "-" * 27 + " Subprocess stdout/stderr" + "-" * 27)
print(out.decode().rstrip())
print("-" * 80)
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
if os.getenv("DISABLE_IPV6") == "1":
return False
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
return True
except OSError:
return False
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from distributed.config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip(f"rlimit too low ({soft}) and can't be increased: {e}")
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
"""Context manager to ensure we haven't leaked any threads"""
# "TCP-Executor" threads are never stopped once they are started
BaseTCPConnector.warmup()
active_threads_start = threading.enumerate()
yield
start = time()
while True:
bad_threads = [
thread
for thread in threading.enumerate()
if thread not in active_threads_start
# FIXME this looks like a genuine leak that needs fixing
and "watch message queue" not in thread.name
]
if not bad_threads:
break
else:
sleep(0.01)
if time() > start + 5:
# Raise an error with information about leaked threads
from distributed import profile
bad_thread = bad_threads[0]
call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
assert False, (bad_thread, call_stacks)
def wait_active_children(timeout: float) -> list[multiprocessing.Process]:
"""Wait until timeout for mp_context.active_children() to terminate.
Return list of active subprocesses after the timeout expired.
"""
t0 = time()
while True:
# Do not sample the subprocesses once at the beginning with
# `for proc in mp_context.active_children: ...`, assume instead that new
# children processes may be spawned before the timeout expires.
children = mp_context.active_children()
if not children:
return []
join_timeout = timeout - time() + t0
if join_timeout <= 0:
return children
children[0].join(timeout=join_timeout)
def term_or_kill_active_children(timeout: float) -> None:
"""Send SIGTERM to mp_context.active_children(), wait up to 3 seconds for processes
to die, then send SIGKILL to the survivors
"""
children = mp_context.active_children()
for proc in children:
proc.terminate()
children = wait_active_children(timeout=timeout)
for proc in children:
proc.kill()
children = wait_active_children(timeout=30)
if children: # pragma: nocover
logger.warning("Leaked unkillable children processes: %s", children)
# It should be impossible to ignore SIGKILL on Linux/MacOSX
assert WINDOWS
@contextmanager
def check_process_leak(
check: bool = True, check_timeout: float = 40, term_timeout: float = 3
):
"""Terminate any currently-running subprocesses at both the beginning and end of this context
Parameters
----------
check : bool, optional
If True, raise AssertionError if any processes survive at the exit
check_timeout: float, optional
Wait up to these many seconds for subprocesses to terminate before failing
term_timeout: float, optional
After sending SIGTERM to a subprocess, wait up to these many seconds before
sending SIGKILL
"""
term_or_kill_active_children(timeout=term_timeout)
try:
yield
if check:
children = wait_active_children(timeout=check_timeout)
assert not children, f"Test leaked subprocesses: {children}"
finally:
term_or_kill_active_children(timeout=term_timeout)
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status in Status.ANY_RUNNING:
w.loop.add_callback(w.close)
Worker._instances.clear()
start = time()
while any(c.status != "closed" for c in Worker._initialized_clients):
sleep(0.1)
assert time() < start + 10
Worker._initialized_clients.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
with check_thread_leak() if threads else nullcontext():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else nullcontext():
with check_active_rpc(loop, timeout):
reset_config()
with dask.config.set(
{
"distributed.comm.timeouts.connect": "5s",
"distributed.admin.tick.interval": "500 ms",
}
):
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
class LockedComm(TCP):
def __init__(self, comm, read_event, read_queue, write_event, write_queue):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.comm = comm
assert isinstance(comm, TCP)
def __getattr__(self, name):
return getattr(self.comm, name)
async def write(self, msg, serializers=None, on_error="message"):
if self.write_queue:
await self.write_queue.put((self.comm.peer_address, msg))
if self.write_event:
await self.write_event.wait()
return await self.comm.write(msg, serializers=serializers, on_error=on_error)
async def read(self, deserializers=None):
msg = await self.comm.read(deserializers=deserializers)
if self.read_queue:
await self.read_queue.put((self.comm.peer_address, msg))
if self.read_event:
await self.read_event.wait()
return msg
async def close(self):
await self.comm.close()
class _LockedCommPool(ConnectionPool):
"""A ConnectionPool wrapper to intercept network traffic between servers
This wrapper can be attached to a running server to intercept outgoing read or write requests in test environments.
Examples
--------
>>> w = await Worker(...)
>>> read_event = asyncio.Event()
>>> read_queue = asyncio.Queue()
>>> w.rpc = _LockedCommPool(
w.rpc,
read_event=read_event,
read_queue=read_queue,
)
# It might be necessary to remove all existing comms
# if the wrapped pool has been used before
>>> w.remove(remote_address)
>>> async def ping_pong():
return await w.rpc(remote_address).ping()
>>> with pytest.raises(asyncio.TimeoutError):
>>> await asyncio.wait_for(ping_pong(), 0.01)
>>> read_event.set()
>>> await ping_pong()
"""
def __init__(
self, pool, read_event=None, read_queue=None, write_event=None, write_queue=None
):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.pool = pool
def __getattr__(self, name):
return getattr(self.pool, name)
async def connect(self, *args, **kwargs):
comm = await self.pool.connect(*args, **kwargs)
return LockedComm(
comm, self.read_event, self.read_queue, self.write_event, self.write_queue
)
async def close(self):
await self.pool.close()
def xfail_ssl_issue5601():
"""Work around https://github.com/dask/distributed/issues/5601 where any test that
inits Security.temporary() crashes on MacOS GitHub Actions CI
"""
pytest.importorskip("cryptography")
try:
Security.temporary()
except ImportError:
if MACOS:
pytest.xfail(reason="distributed#5601")
raise
def assert_worker_story(
story: list[tuple], expect: list[tuple], *, strict: bool = False
) -> None:
"""Test the output of ``Worker.story``
Parameters
==========
story: list[tuple]
Output of Worker.story
expect: list[tuple]
Expected events. Each expected event must contain exactly 2 less fields than the
story (the last two fields are always the stimulus_id and the timestamp).
Elements of the expect tuples can be
- callables, which accept a single element of the event tuple as argument and
return True for match and False for no match;
- arbitrary objects, which are compared with a == b
e.g.
.. code-block:: python
expect=[
("x", "missing", "fetch", "fetch", {}),
("gather-dependencies", worker_addr, lambda set_: "x" in set_),
]
strict: bool, optional
If True, the story must contain exactly as many events as expect.
If False (the default), the story may contain more events than expect; extra
events are ignored.
"""
now = time()
prev_ts = 0.0
for ev in story:
try:
assert len(ev) > 2
assert isinstance(ev, tuple)
assert isinstance(ev[-2], str) and ev[-2] # stimulus_id
assert isinstance(ev[-1], float) # timestamp
assert prev_ts <= ev[-1] # Timestamps are monotonic ascending
# Timestamps are within the last hour. It's been observed that a timestamp
# generated in a Nanny process can be a few milliseconds in the future.
assert now - 3600 < ev[-1] <= now + 1
prev_ts = ev[-1]
except AssertionError:
raise AssertionError(
f"Malformed story event: {ev}\nin story:\n{_format_story(story)}"
)
try:
if strict and len(story) != len(expect):
raise StopIteration()
story_it = iter(story)
for ev_expect in expect:
while True:
event = next(story_it)
# Ignore (stimulus_id, timestamp)
event = event[:-2]
if len(event) == len(ev_expect) and all(
ex(ev) if callable(ex) else ev == ex
for ev, ex in zip(event, ev_expect)
):
break
except StopIteration:
raise AssertionError(
f"assert_worker_story({strict=}) failed\n"
f"story:\n{_format_story(story)}\n"
f"expect:\n{_format_story(expect)}"
) from None
def _format_story(story: list[tuple]) -> str:
if not story:
return "(empty story)"
return "- " + "\n- ".join(str(ev) for ev in story)
class BrokenComm(Comm):
peer_address = ""
local_address = ""
def close(self):
pass
def closed(self):
return True
def abort(self):
pass
def read(self, deserializers=None):
raise OSError()
def write(self, msg, serializers=None, on_error=None):
raise OSError()
def has_pytestmark(test_func: Callable, name: str) -> bool:
"""Return True if the test function is marked by the given @pytest.mark.<name>;
False otherwise.
FIXME doesn't work with individually marked parameters inside
@pytest.mark.parametrize
"""
marks = getattr(test_func, "pytestmark", [])
return any(mark.name == name for mark in marks)
|
save_tiles_tfr_multiprocessing.py
|
#!/usr/bin/env python
import multiprocessing
from subprocess import call
import csv
try:
import mapnik2 as mapnik
except:
import mapnik
import sys, os, random as rd
import tensorflow as tf, cv2
# Define some parameters
layers = ['complete','amenity', 'barriers','bridge','buildings','landcover','landuse','natural','others','roads','text','water']
save_dir = '/images/50x1500'
tiles_by_file = 10000
initial_row = 0 # The first row to process
dataset_name = "train_1"
num_threads = 8
num_items = 10
size = 0.0005
csv_filename = '/images/'+ dataset_name + '_' + 'resumen.csv'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def save_data(data, layers, metaclass, location, writer):
global num_items
global csv_filename
global header
for item in range(0,num_items):
features = {
'item' : _int64_feature(item),
'metaclass': _int64_feature(metaclass),
'lat' : _floats_feature(float(location['lat'])),
'lon' : _floats_feature(float(location['lon'])),
'node_id' : _int64_feature(int(location['id'])),
'class' : _int64_feature(int(location['class'])),
'subclass' : _int64_feature(int(location['subclass']))
}
for layer in layers:
features[layer] = _bytes_feature([tf.compat.as_bytes(data[(item, layer)])])
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=features))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
with open(filename, 'a') as csvFile:
csv_writer = csv.DictWriter(csvFile, fieldnames = header)
csv_writer.writerow(location)
class RenderThread:
def __init__(self, q, printLock):
self.q = q
self.maxZoom = 1
self.printLock = printLock
self.width = 256
self.height = 256
def rendertiles(self, cpoint, data, item, layer, projec, zoom):
# target projection
#merc = mapnik.Projection('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over')
merc = projec
# WGS lat/long source projection of centre
longlat = mapnik.Projection('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# make a new Map object for the given mapfile
m = mapnik.Map(self.width, self.height)
mapfile = "/map_data/styles/bs_" + layer + ".xml"
mapnik.load_map(m, mapfile)
# ensure the target map projection is mercator
m.srs = merc.params()
# transform the centre point into the target coord sys
centre = mapnik.Coord(cpoint[0], cpoint[1])
transform = mapnik.ProjTransform(longlat, merc)
merc_centre = transform.forward(centre)
# 360/(2**zoom) degrees = 256 px
# so in merc 1px = (20037508.34*2) / (256 * 2**zoom)
# hence to find the bounds of our rectangle in projected coordinates + and - half the image width worth of projected coord units
dx = ((20037508.34*2*(self.width/2)))/(256*(2 ** (zoom)))
minx = merc_centre.x - dx
maxx = merc_centre.x + dx
# grow the height bbox, as we only accurately set the width bbox
m.aspect_fix_mode = mapnik.aspect_fix_mode.ADJUST_BBOX_HEIGHT
bounds = mapnik.Box2d(minx, merc_centre.y-10, maxx, merc_centre.y+10) # the y bounds will be fixed by mapnik due to ADJUST_BBOX_HEIGHT
m.zoom_to_box(bounds)
# render the map image to a file
# mapnik.render_to_file(m, output)
#render the map to an image
im = mapnik.Image(self.width,self.height)
mapnik.render(m, im)
img = im.tostring('png256')
data[(item, layer)]= img
def loop(self):
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(bounds, data, item, layer, projec, zoom) = r
self.rendertiles(bounds, data, item, layer, projec, zoom)
self.printLock.acquire()
self.printLock.release()
self.q.task_done()
def render_location(locations, writer):
global size
global num_items
global num_threads
global layers
global initial_row
global tiles_by_file
counter = 0
while counter < tiles_by_file:
index = initial_row + counter
location = locations[index]
metaclass = index
lat = float(location['lat'])
lon = float(location['lon'])
cpoint = [lon, lat]
with multiprocessing.Manager() as manager:
data = manager.dict() # Create a list that can be shared between processes
queue = multiprocessing.JoinableQueue(32)
printLock = multiprocessing.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread( queue, printLock)
render_thread = multiprocessing.Process(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
#---Generate num_tems images from shifting in the range [0,0.8*size] and rotating
for item in range(0 , num_items):
if item == 0:
shift_lat = 0
shift_lon = 0
teta = 0
zoom = 18
else:
shift_lat = 0.1*size*(rd.random()-rd.random())
shift_lon = 0.1*size*(rd.random()-rd.random())
teta = 360*(rd.random()-rd.random()) #45*rd.random()
zoom = rd.randint(19,20)
for layer in layers:
new_cpoint = [cpoint[0]+shift_lon, cpoint[1]+shift_lat]
aeqd = mapnik.Projection('+proj=aeqd +ellps=WGS84 +lat_0=90 +lon_0='+str(teta))
t = (new_cpoint, data, item, layer, aeqd, zoom)
queue.put(t)
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
save_data(data, layers, metaclass, location, writer)
# Open the csv with the location information
locations = []
with open('/map_data/locations_50x1500.csv') as csvfile:
reader = csv.DictReader(csvfile)
header = reader.fieldnames
for row in reader:
locations.append(row)
print("{} Pointes were found".format(len(locations)))
print(locations[0])
# Save the header for the csv file of resume
header.extend(['metaclass', 'item'])
with open(csv_filename, 'w') as csvFile:
csv_writer = csv.DictWriter(csvFile, fieldnames=header)
csv_writer.writeheader()
# Create the tfrecords writer
filename = os.path.join(save_dir, dataset_name + '.tfrecords')
writer = tf.python_io.TFRecordWriter(filename)
render_location(locations, writer)
writer.close()
sys.stdout.flush()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import sys
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
import bisect
import random
import socket
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
# TODO: for Python 3.3+, PYTHONHASHSEED should be reset to disable randomized
# hash for string
def portable_hash(x):
"""
This function returns consistant hash code for builtin types, especially
for None and tuple with None.
The algrithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxint
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = socket.socket()
try:
sock.connect(("localhost", port))
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return imap(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return ifilter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> rdd.sample(False, 0.1, 81).count()
10
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(5), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> rdd1.collect()
[1, 3]
>>> rdd2.collect()
[0, 2, 4]
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxint - int(numStDev * sqrt(sys.maxint))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxint)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda (k, vs): all(vs)) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == 'true')
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[len(samples) * (i + 1) / numPartitions]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = numPartitions
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, (int, long)):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.partitions().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self._jrdd.partitions().size() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).collect())
[1, 2, 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> ''.join(sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)))
'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in buckets.keys():
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = (size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(batch / 1.5, 1)
c = 0
for split, items in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower()
== 'true')
memory = _parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m"))
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.iteritems()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.iteritems()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1]), list(y[2]), list(y[3])))), \
sorted(list(w.groupWith(x, y, z).collect())))
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func((key, vals)):
return vals[0] and not vals[1]
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if name_:
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda (k, v): k == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000))
>>> (rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000)) / 1000.0
>>> (rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 950 < n < 1050
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 18 < n < 22
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
if relativeSD > 0.37:
raise ValueError("relativeSD should be smaller than 0.37")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
partitions = xrange(self.getNumPartitions())
for partition in partitions:
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# tracking the life cycle by obj
if obj is not None:
obj._broadcast = broadcast
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self._broadcast = None
def __del__(self):
if self._broadcast:
self._broadcast.unpersist()
self._broadcast = None
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
wtimegui3.py
|
#!/usr/bin/env python3
#
# wtimegui - Working time class with GUI
#
import sys
import time
try:
from Tkinter import *
import ttk
import tkMessageBox
from threading import *
except ImportError:
from tkinter import *
from tkinter import ttk
from threading import *
from tkinter import messagebox as tkMessageBox
from wtime4 import wtime
__author__ = "Riccardo Bruno"
__copyright__ = "2017"
__license__ = "Apache"
__maintainer__ = "Riccardo Bruno"
__email__ = "riccardo.bruno@gmail.com"
class wtimeGUI:
flag_ticket_reached = False
flag_time_reached = False
flag_thread_running = False
interval_thread_waitcycles = 5
check_time_thread=None
wtime_out = {}
winTITLE="wtime GUI"
lblFONT=("Lucida Grande", 12)
lblFGCOLOR='black'
root = None
GUI_data = (
{"type": "text", "name": "t1", "title": "T1", "row": 0, "col": 0},
{"type": "text", "name": "t2", "title": "T2", "row": 1, "col": 0},
{"type": "text", "name": "t2t1", "title": "T2 - T1", "row": 1, "col": 2},
{"type": "text", "name": "t3", "title": "T3", "row": 2, "col": 0},
{"type": "text", "name": "t4", "title": "T4", "row": 3, "col": 0},
{"type": "text", "name": "t4t3", "title": "T4 - T3", "row": 3, "col": 2},
{"type": "text", "name": "pause time", "title": "Pause Time", "row": 4, "col": 0},
{"type": "text", "name": "total time", "title": "Total Time", "row": 5, "col": 0},
{"type": "text", "name": "overtime", "title": "Over Time", "row": 5, "col": 2},
{"type": "text", "name": "time to reach", "title": "Time to reach", "row": 6, "col": 0},
{"type": "text", "name": "ticket remaining", "title": "Ticket remain", "row": 7, "col": 0},
{"type": "text", "name": "ticket remaining at", "title": "at", "row": 8, "col": 0},
{"type": "text", "name": "ticket time", "title": "TicketTime", "row": 9, "col": 0},
{"type": "text", "name": "ticket remaining perc", "title": "%", "row": 8, "col": 2},
{"type": "progress", "name": "ticket progress", "title": "Ticket progress", "row": 8, "col": 3},
{"type": "text", "name": "time remaining", "title": "Time remain", "row": 10, "col": 0},
{"type": "text", "name": "time remaining at", "title": "at", "row": 11, "col": 0},
{"type": "text", "name": "time remaining perc", "title": "%", "row": 11, "col": 2},
{"type": "progress", "name": "time progress", "title": "Time progress", "row": 11, "col": 3},
{"type": "button", "name": "Tx", "title": "T2", "row": 12, "col": 0},
{"type": "button", "name": "Update", "title": "Update", "row": 12, "col": 1},
{"type": "button", "name": "Exit", "title": "Exit", "row": 12, "col": 3},
)
def get_item(self, type, name):
item_result = None
for item in self.GUI_data:
if item["type"] == type and item["name"] == name:
item_result = item
break
return item_result
def __init__(self):
# wtime4
self.t1, self.t2, self.t3, self.t4, self.ct = wtime.getTimes(sys.argv)
self.wt = wtime(t1=self.t1, t2=self.t2, t3=self.t3, t4=self.t4, current_time=self.ct)
# GUI
self.root = Tk()
self.root.title(self.winTITLE)
self.gui_build()
self.check_time()
self.root.bind('<Return>',self.btnUpdate)
self.root.bind('<space>',self.btnUpdate)
self.root.bind('<Escape>',self.btnExit)
self.root.lift()
self.root.protocol("WM_DELETE_WINDOW", self.btnExit)
self.root.call('wm', 'attributes', '.', '-topmost', True)
self.root.after_idle(self.root.call, 'wm', 'attributes', '.', '-topmost', False)
#Thread
self.check_time_thread = Thread(target=self.check_time_thread, args=(self,))
self.check_time_thread.start()
# Main loop
self.root.mainloop()
def update_T_button(self):
button = self.get_item("button","Tx")["button_ctl"]
if self.t4 is not None:
button["text"] = "T-"
button["state"] = DISABLED
elif self.t3 is not None:
button["text"] = "T4"
elif self.t2 is not None:
button["text"] = "T3"
else:
pass
def check_time(self):
self.wtime_out = self.wt.calc2()
self.gui_update()
def btnTx(self, *args):
ts = wtime.get_ts()
if self.t2 is None:
self.t2 = ts
elif self.t3 is None:
self.t3 = ts
elif self.t4 is None:
self.t4 = ts
else:
return
self.wt = wtime(t1=self.t1, t2=self.t2, t3=self.t3, t4=self.t4, current_time=self.ct)
self.update_T_button()
self.btnUpdate()
def btnExit(self, *args):
self.btnUpdate()
self.flag_thread_running = False
self.root.destroy()
sys.exit(0)
def btnUpdate(self, *args):
self.check_time()
self.wt.printout(self.wtime_out)
#print(self.wtime_out)
def btnUnknown(self, *args):
print("WARNING: Unknown button pressed")
def show_message_box(self, message):
self.root.attributes("-topmost", True)
tkMessageBox.showinfo("wtimegui", message,parent=self.root)
self.root.attributes("-topmost", False)
def gui_build(self):
for item in self.GUI_data:
if item["type"] == "text":
if item["name"] in ("ticket remaining",
"ticket remaining at",
"time remaining",
"time remaining at",
"overtime"):
lblFONT_val_style = ("bold",)
else:
lblFONT_val_style = ()
if item["name"] in ("ticket remaining",
"ticket remaining perc",
"time remaining",
"time remaining perc",
"overtime"):
lblFONT_lbl_style = ("bold",)
else:
lblFONT_lbl_style = ()
item["label_var"] = StringVar()
item["value_var"] = StringVar()
item["label_ctl"] = Label(self.root,
textvariable=item["label_var"],
text="None",
font=self.lblFONT + lblFONT_lbl_style,
fg=self.lblFGCOLOR).grid(row=item["row"],
column=item["col"])
item["value_ctl"] = Label(self.root,
textvariable = item["value_var"],
text="None",
font=self.lblFONT + lblFONT_val_style,
fg=self.lblFGCOLOR).grid(row=item["row"],
column=item["col"]+1)
elif item["type"] == "progress":
item["progress_ctl"] = ttk.Progressbar(self.root,
orient=HORIZONTAL,
length=64,
mode='determinate')
item["progress_ctl"].grid(row=item["row"],
column=item["col"])
elif item["type"] == "button":
if item["title"] == "Exit":
callback = self.btnExit
elif item["title"] == "Update":
callback = self.btnUpdate
elif item["title"][0] == "T":
callback = self.btnTx
else:
print("WARNING: Unhespected button named: %s" % item["title"])
callback = self.btnUnknown
item['button_ctl'] = Button(self.root,
text=item["title"],
command=callback)
item['button_ctl'].grid(row=item["row"],
column=item["col"])
else:
print("WARNING: Skipping unknown type: '%s' for item '%s'"
% (item["type"], item["title"]))
self.update_T_button()
def gui_update(self):
for item in self.GUI_data:
if item["type"] == "text":
if item["name"] == "time remaining perc" or item["name"] == "ticket remaining perc":
perc = max(0, self.wtime_out.get(item["name"],""))
item["label_var"].set("%2d%%: " % perc)
elif item["name"] == "total time" and self.wtime_out.get("consume pause", None) is not None:
item["label_var"].set("Consuming pause: ")
item["value_var"].set(self.wtime_out["consume pause"])
else:
value = self.wtime_out.get(item["name"],"")
if value != "":
item["label_var"].set(item["title"] + " :")
item["value_var"].set(value)
else:
pass
elif item["type"] == "progress":
if item["name"] == "time progress":
item["progress_ctl"]["value"] = self.wtime_out["time remaining perc"]
elif item["name"] == "ticket progress":
item["progress_ctl"]["value"] = self.wtime_out["ticket remaining perc"]
else:
pass
elif item["type"] == "button":
pass
else:
print("WARNING: Skipping unknown type: '%s' for item '%s'"
% (item["type"], item["title"]))
def check_time_thread(self, *args):
gui = args[0]
if gui is None:
print("ERROR: No GUI passed")
time.sleep(.1)
try:
t = currentThread()
except NameError as e:
t = current_thread()
print("wtime updating thread started")
self.flag_thread_running = True
while self.flag_thread_running:
overtime = self.wtime_out.get("overtime", None)
if overtime is not None and self.flag_time_reached == False:
print("You've DONE!!!")
self.flag_time_reached = True
self.flag_ticket_reached = True
gui.show_message_box("You've DONE!!!")
elif overtime is not None and self.flag_time_reached == True:
self.TotalTimeText = "Overtime: "
self.gui_build()
elif self.wtime_out["ticket remaining"] == "reached" and self.flag_ticket_reached == False:
print("Ticket reached!!!")
self.flag_ticket_reached = True
gui.show_message_box("Ticket reached!!!")
self.check_time()
for i in range(1,10 * self.interval_thread_waitcycles):
if self.flag_thread_running:
time.sleep(.1)
else:
break
except Exception as e:
print("Exception: %s" % type(e).__name__ )
print("wtime updating thread not started: %s" % e)
print("wtime updating thread terminated")
if __name__ == "__main__":
gui = wtimeGUI()
|
main.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 03:38:43 2020
@author: hp
"""
import cv2
import dlib
import numpy as np
import threading
from yolo_helper import YoloV3, load_darknet_weights, draw_outputs
from dlib_helper import (shape_to_np,
eye_on_mask,
contouring,
process_thresh,
print_eye_pos,
)
from define_mouth_distances import return_distances
yolo = YoloV3()
load_darknet_weights(yolo, 'yolov3.weights')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_68.dat')
d_outer, d_inner = return_distances(detector, predictor)
cap = cv2.VideoCapture(0)
_, frame_size = cap.read()
def eyes_mouth():
ret, img = cap.read()
thresh = img.copy()
w, h = img.shape[:2]
outer_points = [[49, 59], [50, 58], [51, 57], [52, 56], [53, 55]]
inner_points = [[61, 67], [62, 66], [63, 65]]
left = [36, 37, 38, 39, 40, 41]
right = [42, 43, 44, 45, 46, 47]
kernel = np.ones((9, 9), np.uint8)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 0, 255, nothing)
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for rect in rects:
shape = predictor(gray, rect)
shape = shape_to_np(shape)
#mouth
cnt_outer = 0
cnt_inner = 0
for i, (p1, p2) in enumerate(outer_points):
if d_outer[i] + 5 < shape[p2][1] - shape[p1][1]:
cnt_outer += 1
for i, (p1, p2) in enumerate(inner_points):
if d_inner[i] + 3 < shape[p2][1] - shape[p1][1]:
cnt_inner += 1
if cnt_outer > 3 or cnt_inner > 2:
print('Mouth open')
for (x, y) in shape[48:]:
cv2.circle(img, (x, y), 2, (0, 0, 255), -1)
#eyes
mask = np.zeros((w, h), dtype=np.uint8)
mask, end_points_left = eye_on_mask(mask, left, shape)
mask, end_points_right = eye_on_mask(mask, right, shape)
mask = cv2.dilate(mask, kernel, 5)
eyes = cv2.bitwise_and(img, img, mask=mask)
mask = (eyes == [0, 0, 0]).all(axis=2)
eyes[mask] = [255, 255, 255]
mid = (shape[42][0] + shape[39][0]) // 2
eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
threshold = cv2.getTrackbarPos('threshold', 'image')
_, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)
thresh = process_thresh(thresh)
eyeball_pos_left = contouring(thresh[:, 0:mid], mid, img, end_points_left)
eyeball_pos_right = contouring(thresh[:, mid:], mid, img, end_points_right, True)
print_eye_pos(eyeball_pos_left, eyeball_pos_right)
cv2.imshow('result', img)
cv2.imshow("image", thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def count_people():
while(True):
ret, image = cap.read()
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (320, 320))
frame = frame.astype(np.float32)
frame = np.expand_dims(frame, 0)
frame = frame / 255
class_names = [c.strip() for c in open("classes.txt").readlines()]
boxes, scores, classes, nums = yolo(frame)
count=0
for i in range(nums[0]):
if int(classes[0][i] == 0):
count +=1
if count == 0:
print('No person detected')
elif count > 1:
print('More than one person detected')
image = draw_outputs(image, (boxes, scores, classes, nums), class_names)
cv2.imshow('Prediction', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
t1 = threading.Thread(target=eyes_mouth)
t2 = threading.Thread(target=count_people)
t1.start()
t2.start()
t1.join()
t2.join()
cap.release()
cv2.destroyAllWindows()
|
servers.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import subprocess
import sys
import threading
import time
import debugpy
from debugpy import adapter
from debugpy.common import compat, fmt, json, log, messaging, sockets
from debugpy.adapter import components
access_token = None
"""Access token used to authenticate with the servers."""
_lock = threading.RLock()
_connections = []
"""All servers that are connected to this adapter, in order in which they connected.
"""
_connections_changed = threading.Event()
class Connection(object):
"""A debug server that is connected to the adapter.
Servers that are not participating in a debug session are managed directly by the
corresponding Connection instance.
Servers that are participating in a debug session are managed by that sessions's
Server component instance, but Connection object remains, and takes over again
once the session ends.
"""
def __init__(self, sock):
from debugpy.adapter import sessions
self.disconnected = False
self.server = None
"""The Server component, if this debug server belongs to Session.
"""
self.pid = None
stream = messaging.JsonIOStream.from_socket(sock, str(self))
self.channel = messaging.JsonMessageChannel(stream, self)
self.channel.start()
try:
self.authenticate()
info = self.channel.request("pydevdSystemInfo")
process_info = info("process", json.object())
self.pid = process_info("pid", int)
self.ppid = process_info("ppid", int, optional=True)
if self.ppid == ():
self.ppid = None
self.channel.name = stream.name = str(self)
debugpy_dir = os.path.dirname(os.path.dirname(debugpy.__file__))
# Note: we must check if 'debugpy' is not already in sys.modules because the
# evaluation of an import at the wrong time could deadlock Python due to
# its import lock.
#
# So, in general this evaluation shouldn't do anything. It's only
# important when pydevd attaches automatically to a subprocess. In this
# case, we have to make sure that debugpy is properly put back in the game
# for users to be able to use it.v
#
# In this case (when the import is needed), this evaluation *must* be done
# before the configurationDone request is sent -- if this is not respected
# it's possible that pydevd already started secondary threads to handle
# commands, in which case it's very likely that this command would be
# evaluated at the wrong thread and the import could potentially deadlock
# the program.
#
# Note 2: the sys module is guaranteed to be in the frame globals and
# doesn't need to be imported.
inject_debugpy = """
if 'debugpy' not in sys.modules:
sys.path.insert(0, {debugpy_dir!r})
try:
import debugpy
finally:
del sys.path[0]
"""
inject_debugpy = fmt(inject_debugpy, debugpy_dir=debugpy_dir)
try:
self.channel.request("evaluate", {"expression": inject_debugpy})
except messaging.MessageHandlingError:
# Failure to inject is not a fatal error - such a subprocess can
# still be debugged, it just won't support "import debugpy" in user
# code - so don't terminate the session.
log.swallow_exception(
"Failed to inject debugpy into {0}:", self, level="warning"
)
with _lock:
# The server can disconnect concurrently before we get here, e.g. if
# it was force-killed. If the disconnect() handler has already run,
# don't register this server or report it, since there's nothing to
# deregister it.
if self.disconnected:
return
if any(conn.pid == self.pid for conn in _connections):
raise KeyError(
fmt("{0} is already connected to this adapter", self)
)
is_first_server = len(_connections) == 0
_connections.append(self)
_connections_changed.set()
except Exception:
log.swallow_exception("Failed to accept incoming server connection:")
self.channel.close()
# If this was the first server to connect, and the main thread is inside
# wait_until_disconnected(), we want to unblock it and allow it to exit.
dont_wait_for_first_connection()
# If we couldn't retrieve all the necessary info from the debug server,
# or there's a PID clash, we don't want to track this debuggee anymore,
# but we want to continue accepting connections.
return
parent_session = sessions.get(self.ppid)
if parent_session is None:
log.info("No active debug session for parent process of {0}.", self)
else:
try:
parent_session.client.notify_of_subprocess(self)
return
except Exception:
# This might fail if the client concurrently disconnects from the parent
# session. We still want to keep the connection around, in case the
# client reconnects later. If the parent session was "launch", it'll take
# care of closing the remaining server connections.
log.swallow_exception(
"Failed to notify parent session about {0}:", self
)
# If we got to this point, the subprocess notification was either not sent,
# or not delivered successfully. For the first server, this is expected, since
# it corresponds to the root process, and there is no other debug session to
# notify. But subsequent server connections represent subprocesses, and those
# will not start running user code until the client tells them to. Since there
# isn't going to be a client without the notification, such subprocesses have
# to be unblocked.
if is_first_server:
return
log.info("No clients to wait for - unblocking {0}.", self)
try:
self.channel.request("initialize", {"adapterID": "debugpy"})
self.channel.request("attach", {"subProcessId": self.pid})
self.channel.request("configurationDone")
self.channel.request("disconnect")
except Exception:
log.swallow_exception("Failed to unblock orphaned subprocess:")
self.channel.close()
def __str__(self):
return "Server" + fmt("[?]" if self.pid is None else "[pid={0}]", self.pid)
def authenticate(self):
if access_token is None and adapter.access_token is None:
return
auth = self.channel.request(
"pydevdAuthorize", {"debugServerAccessToken": access_token}
)
if auth["clientAccessToken"] != adapter.access_token:
self.channel.close()
raise RuntimeError('Mismatched "clientAccessToken"; server not authorized.')
def request(self, request):
raise request.isnt_valid(
"Requests from the debug server to the client are not allowed."
)
def event(self, event):
pass
def terminated_event(self, event):
self.channel.close()
def disconnect(self):
with _lock:
self.disconnected = True
if self.server is not None:
# If the disconnect happened while Server was being instantiated,
# we need to tell it, so that it can clean up via Session.finalize().
# It will also take care of deregistering the connection in that case.
self.server.disconnect()
elif self in _connections:
_connections.remove(self)
_connections_changed.set()
def attach_to_session(self, session):
"""Attaches this server to the specified Session as a Server component.
Raises ValueError if the server already belongs to some session.
"""
with _lock:
if self.server is not None:
raise ValueError
log.info("Attaching {0} to {1}", self, session)
self.server = Server(session, self)
class Server(components.Component):
"""Handles the debug server side of a debug session."""
message_handler = components.Component.message_handler
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsCompletionsRequest": False,
"supportsConditionalBreakpoints": False,
"supportsConfigurationDoneRequest": False,
"supportsDataBreakpoints": False,
"supportsDelayedStackTraceLoading": False,
"supportsDisassembleRequest": False,
"supportsEvaluateForHovers": False,
"supportsExceptionInfoRequest": False,
"supportsExceptionOptions": False,
"supportsFunctionBreakpoints": False,
"supportsGotoTargetsRequest": False,
"supportsHitConditionalBreakpoints": False,
"supportsLoadedSourcesRequest": False,
"supportsLogPoints": False,
"supportsModulesRequest": False,
"supportsReadMemoryRequest": False,
"supportsRestartFrame": False,
"supportsRestartRequest": False,
"supportsSetExpression": False,
"supportsSetVariable": False,
"supportsStepBack": False,
"supportsStepInTargetsRequest": False,
"supportsTerminateDebuggee": False,
"supportsTerminateRequest": False,
"supportsTerminateThreadsRequest": False,
"supportsValueFormattingOptions": False,
"exceptionBreakpointFilters": [],
"additionalModuleColumns": [],
"supportedChecksumAlgorithms": [],
}
def __init__(self, session, connection):
assert connection.server is None
with session:
assert not session.server
super(Server, self).__init__(session, channel=connection.channel)
self.connection = connection
assert self.session.pid is None
if self.session.launcher and self.session.launcher.pid != self.pid:
log.info(
"Launcher reported PID={0}, but server reported PID={1}",
self.session.launcher.pid,
self.pid,
)
self.session.pid = self.pid
session.server = self
@property
def pid(self):
"""Process ID of the debuggee process, as reported by the server."""
return self.connection.pid
@property
def ppid(self):
"""Parent process ID of the debuggee process, as reported by the server."""
return self.connection.ppid
def initialize(self, request):
assert request.is_request("initialize")
self.connection.authenticate()
request = self.channel.propagate(request)
request.wait_for_response()
self.capabilities = self.Capabilities(self, request.response)
# Generic request handler, used if there's no specific handler below.
@message_handler
def request(self, request):
# Do not delegate requests from the server by default. There is a security
# boundary between the server and the adapter, and we cannot trust arbitrary
# requests sent over that boundary, since they may contain arbitrary code
# that the client will execute - e.g. "runInTerminal". The adapter must only
# propagate requests that it knows are safe.
raise request.isnt_valid(
"Requests from the debug server to the client are not allowed."
)
# Generic event handler, used if there's no specific handler below.
@message_handler
def event(self, event):
self.client.propagate_after_start(event)
@message_handler
def initialized_event(self, event):
# pydevd doesn't send it, but the adapter will send its own in any case.
pass
@message_handler
def process_event(self, event):
# If there is a launcher, it's handling the process event.
if not self.launcher:
self.client.propagate_after_start(event)
@message_handler
def continued_event(self, event):
# https://github.com/microsoft/ptvsd/issues/1530
#
# DAP specification says that a step request implies that only the thread on
# which that step occurred is resumed for the duration of the step. However,
# for VS compatibility, pydevd can operate in a mode that resumes all threads
# instead. This is set according to the value of "steppingResumesAllThreads"
# in "launch" or "attach" request, which defaults to true. If explicitly set
# to false, pydevd will only resume the thread that was stepping.
#
# To ensure that the client is aware that other threads are getting resumed in
# that mode, pydevd sends a "continued" event with "allThreadsResumed": true.
# when responding to a step request. This ensures correct behavior in VSCode
# and other DAP-conformant clients.
#
# On the other hand, VS does not follow the DAP specification in this regard.
# When it requests a step, it assumes that all threads will be resumed, and
# does not expect to see "continued" events explicitly reflecting that fact.
# If such events are sent regardless, VS behaves erratically. Thus, we have
# to suppress them specifically for VS.
if self.client.client_id not in ("visualstudio", "vsformac"):
self.client.propagate_after_start(event)
@message_handler
def exited_event(self, event):
# If there is a launcher, it's handling the exit code.
if not self.launcher:
self.client.propagate_after_start(event)
@message_handler
def terminated_event(self, event):
# Do not propagate this, since we'll report our own.
self.channel.close()
def detach_from_session(self):
with _lock:
self.is_connected = False
self.channel.handlers = self.connection
self.channel.name = self.channel.stream.name = str(self.connection)
self.connection.server = None
def disconnect(self):
with _lock:
_connections.remove(self.connection)
_connections_changed.set()
super(Server, self).disconnect()
def serve(host="127.0.0.1", port=0):
global listener
listener = sockets.serve("Server", Connection, host, port)
return listener.getsockname()
def stop_serving():
try:
listener.close()
except Exception:
log.swallow_exception(level="warning")
def connections():
with _lock:
return list(_connections)
def wait_for_connection(session, predicate, timeout=None):
"""Waits until there is a server with the specified PID connected to this adapter,
and returns the corresponding Connection.
If there is more than one server connection already available, returns the oldest
one.
"""
def wait_for_timeout():
time.sleep(timeout)
wait_for_timeout.timed_out = True
with _lock:
_connections_changed.set()
wait_for_timeout.timed_out = timeout == 0
if timeout:
thread = threading.Thread(
target=wait_for_timeout, name="servers.wait_for_connection() timeout"
)
thread.daemon = True
thread.start()
if timeout != 0:
log.info("{0} waiting for connection from debug server...", session)
while True:
with _lock:
_connections_changed.clear()
conns = (conn for conn in _connections if predicate(conn))
conn = next(conns, None)
if conn is not None or wait_for_timeout.timed_out:
return conn
_connections_changed.wait()
def wait_until_disconnected():
"""Blocks until all debug servers disconnect from the adapter.
If there are no server connections, waits until at least one is established first,
before waiting for it to disconnect.
"""
while True:
_connections_changed.wait()
with _lock:
_connections_changed.clear()
if not len(_connections):
return
def dont_wait_for_first_connection():
"""Unblocks any pending wait_until_disconnected() call that is waiting on the
first server to connect.
"""
with _lock:
_connections_changed.set()
def inject(pid, debugpy_args):
host, port = listener.getsockname()
cmdline = [
sys.executable,
compat.filename(os.path.dirname(debugpy.__file__)),
"--connect",
host + ":" + str(port),
]
if adapter.access_token is not None:
cmdline += ["--adapter-db-token", adapter.access_token]
cmdline += debugpy_args
cmdline += ["--pid", str(pid)]
log.info("Spawning attach-to-PID debugger injector: {0!r}", cmdline)
try:
injector = subprocess.Popen(
cmdline,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except Exception as exc:
log.swallow_exception(
"Failed to inject debug server into process with PID={0}", pid
)
raise messaging.MessageHandlingError(
fmt(
"Failed to inject debug server into process with PID={0}: {1}", pid, exc
)
)
# We need to capture the output of the injector - otherwise it can get blocked
# on a write() syscall when it tries to print something.
def capture_output():
while True:
line = injector.stdout.readline()
if not line:
break
log.info("Injector[PID={0}] output:\n{1}", pid, line.rstrip())
log.info("Injector[PID={0}] exited.", pid)
thread = threading.Thread(
target=capture_output, name=fmt("Injector[PID={0}] output", pid)
)
thread.daemon = True
thread.start()
|
record.py
|
import logging as log
import os
import errno
from threading import Thread
try:
from urllib import urlopen
from urlparse import urlparse
import unicodecsv as csv
except ImportError:
from urllib.request import urlopen
from urllib.parse import urlparse
import csv
class FolderCreationError(Exception):
pass
class BrokenImageError(Exception):
pass
def make_path(path):
"""Ensures all the folders in path exists.
Raises FolderCreationError if failed to create the required folders.
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise FolderCreationError('Failed to create folder <{}>'.format(path))
class Record(object):
def __init__(self, filename, schema):
self.filename = filename + '.csv'
# if there's a folder in the filename make sure it exists
if (os.path.dirname(self.filename)):
make_path(os.path.dirname(self.filename))
self.file = open(self.filename, 'w')
self.writer = csv.DictWriter(self.file, fieldnames=schema, quoting=csv.QUOTE_ALL, strict=True)
self.writer.writeheader()
log.info('Created a new record file at: %s', self.filename)
def __del__(self):
self.file.close()
def add_record(self, data):
self.writer.writerow(data)
class Album(object):
def __init__(self, name, descriptions=False):
make_path(name)
self.name = name
self.record = None
if descriptions:
self.record = Record(name, ['filename', 'description', 'permalink'])
def add_image(self, url):
Thread(target=self._image_dl(url), args=(url)).start()
def _image_dl(self, url):
try:
img_bin = urlopen(url).read()
if img_bin:
filename = urlparse(url).path.split('/')[-1]
fullpath = os.path.join(self.name, filename)
with open(fullpath, 'wb') as f:
f.write(img_bin)
return
except IOError:
# there was some issue with the connection
# raise the Broken Image Error
pass
raise BrokenImageError
def add_description(self, imgurl, desc, perma):
if self.record:
filename = urlparse(imgurl).path.split('/')[-1]
self.record.add_record({
'filename': filename,
'description': desc,
'permalink': perma,
})
|
oxcart.py
|
"""
This is the main script for doing experiment.
It contains the main control loop of experiment.
@author: Mehrpad Monajem <mehrpad.monajem@fau.de>
TODO: Replace print statements with Log statements
"""
import time
import datetime
import h5py
import multiprocessing
from multiprocessing.queues import Queue
import threading
import numpy as np
# Serial ports and NI
import serial.tools.list_ports
import pyvisa as visa
import nidaqmx
# Local project scripts
import tdc
import tdc_new
import variables
from devices import email_send, tweet_send, initialize_devices, drs, signal_generator
def logging():
"""
The function is used to insantiate and configute logger object for logging.
The function use python native logging library.
Attributes:
Does not accept any arguments
Returns:
Returns the logger object which could be used log statements of following level:
1. INFO: "Useful information"
2. WARNING: "Something is not right"
3. DEBUG: "A debug message"
4. ERROR: "A Major error has happened."
5. CRITICAL "Fatal error. Cannot continue"
"""
import logging
# Gets or creates a logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.INFO)
# define file handler and set formatter
# Reads file path from imported "variables" file
file_handler = logging.FileHandler(variables.path + '\\logfile.log', mode='w')
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
return logger
class OXCART:
"""
OXCART class
"""
def __init__(self, queue_x, queue_y, queue_t, queue_dld_start_counter,
queue_channel, queue_time_data, queue_tdc_start_counter,
queue_ch0_time, queue_ch0_wave, queue_ch1_time, queue_ch1_wave,
queue_ch2_time, queue_ch2_wave, queue_ch3_time, queue_ch3_wave,
lock1, lock2):
'''
This is the constructor class that accepts several initialized queues objects corresponding
to various parameters of the groups like dld,TDC,DRS. This constructor also objects used for
creating locks on resources to reduce concurrent access on resources and reduce dirty read.
'''
# Queues for sharing data between tdc and main process
# dld queues
self.queue_x = queue_x
self.queue_y = queue_y
self.queue_t = queue_t
self.queue_dld_start_counter = queue_dld_start_counter
self.lock1 = lock1
# TDC queues
self.queue_channel = queue_channel
self.queue_time_data = queue_time_data
self.queue_tdc_start_counter = queue_tdc_start_counter
self.lock2 = lock2
# DRS queues
self.queue_ch0_time = queue_ch0_time
self.queue_ch0_wave = queue_ch0_wave
self.queue_ch1_time = queue_ch1_time
self.queue_ch1_wave = queue_ch1_wave
self.queue_ch2_time = queue_ch2_time
self.queue_ch2_wave = queue_ch2_wave
self.queue_ch3_time = queue_ch3_time
self.queue_ch3_wave = queue_ch3_wave
def initialize_v_dc(self):
"""
This class method intializes the high volatge parameter: v_dc.
The fucntion utilizes the serial library to communicate over the
COM port serially and read the corresponding v_dc parameter.
It exits if it is not able to connect on the COM Port.
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
# Setting the com port of V_dc
self.com_port_v_dc = serial.Serial(
port=initialize_devices.com_ports[variables.com_port_idx_V_dc].device, # chosen COM port
baudrate=115200, # 115200
bytesize=serial.EIGHTBITS, # 8
parity=serial.PARITY_NONE, # N
stopbits=serial.STOPBITS_ONE # 1
)
# configure the COM port to talk to. Default values: 115200,8,N,1
if self.com_port_v_dc.is_open:
self.com_port_v_dc.flushInput()
self.com_port_v_dc.flushOutput()
cmd_list = [">S1 3.0e-4", ">S0B 0", ">S0 %s" % variables.vdc_min, "F0", ">S0?", ">DON?",
">S0A?"]
for cmd in range(len(cmd_list)):
self.command_v_dc(cmd_list[cmd])
else:
print("Couldn't open Port!")
exit()
def initialize_v_p(self):
"""
This class method intializes the Pulse parameter: v_p.
The fucntion utilizes the serial library to communicate over the
COM port serially and read the corresponding v_p parameter.
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
# set the port for v_p
resources = visa.ResourceManager('@py')
self.com_port_v_p = resources.open_resource('ASRL4::INSTR')
try:
self.com_port_v_p.query('*RST')
except:
self.com_port_v_p.write('VOLT %s' % (variables.v_p_min * (1 / variables.pulse_amp_per_supply_voltage)))
def initialize_counter(self):
"""
This class method intializes the edge counter parameter.
It helps counting in edges of a particular signal.
The fucntion utilizes the nidaqmx library to communicate
through NI Instruments to count the edges.
NI-DAQmx can help you use National Instruments (NI) data acquisition and
signal conditioning hardware
Attributes:
Accepts only the self (class object)
Returns:
Returns the counted edges
"""
task_counter = nidaqmx.Task()
task_counter.ci_channels.add_ci_count_edges_chan("Dev1/ctr0")
# reference the terminal you want to use for the counter here
task_counter.ci_channels[0].ci_count_edges_term = "PFI0"
return task_counter
# apply command to the V_dc
def command_v_dc(self, cmd):
"""
This class method is used to send commands on the high volatge parameter: v_dc.
The fucntion utilizes the serial library to communicate over the
COM port serially and read the corresponding v_dc parameter.
Attributes:
Accepts only the self (class object)
Returns:
Returns the response code after executing the command.
"""
self.com_port_v_dc.write(
(cmd + '\r\n').encode()) # send cmd to device # might not work with older devices -> "LF" only needed!
time.sleep(0.005) # small sleep for response
#Intialize the response to returned as string
response = ''
# Read the response code after execution(command write).
while self.com_port_v_dc.in_waiting > 0:
response = self.com_port_v_dc.readline() # all characters received, read line till '\r\n'
return response.decode("utf-8")
def reader_queue_dld(self):
"""
This class method runs in an infinite loop and listens and reads paramters
over the queues for the group: dld
This function is called continuously by a separate thread in the main function.
The values read from the queues are updates in imported "variables" file
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
while True:
# Check if any value is present in queue to read from
while not self.queue_x.empty() or not self.queue_y.empty() or not self.queue_t.empty() or not self.queue_dld_start_counter.empty():
# Utilize locking mechanism to avoid concurrent use of resources and dirty reads
with self.lock1:
length = self.queue_x.get()
variables.x = np.append(variables.x, length)
variables.y = np.append(variables.y, self.queue_y.get())
variables.t = np.append(variables.t, self.queue_t.get())
variables.dld_start_counter = np.append(variables.dld_start_counter,
self.queue_dld_start_counter.get())
variables.main_v_dc_dld = np.append(variables.main_v_dc_dld, np.tile(variables.specimen_voltage, len(length)))
variables.main_v_p_dld = np.append(variables.main_v_p_dld, np.tile(variables.pulse_voltage, len(length)))
# If end of experiment flag is set break the while loop
if variables.end_experiment:
break
def reader_queue_drs(self):
"""
This class method runs in an infinite loop and listens and reads paramters
over the queues for the group: DRS
This function is called continuously by a separate thread in the main function.
The values read from the queues are updates in imported "variables" file.
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
while True:
# Check if any value is present in queue to read from
while not self.queue_ch0_time.empty() or not self.queue_ch0_wave.empty() or not self.queue_ch1_time.empty() or not\
self.queue_ch1_wave.empty() or not self.queue_ch2_time.empty() or not\
self.queue_ch2_wave.empty() or not self.queue_ch3_time.empty() or not self.queue_ch3_wave.empty():
#Utilize locking mechanism to avoid concurrent use of resources and dirty reads
with self.lock1:
length = self.queue_ch0_time.get()
variables.ch0_time = np.append(variables.ch0_time, length)
variables.ch0_wave = np.append(variables.ch0_wave, self.queue_ch0_wave.get())
variables.ch1_time = np.append(variables.ch1_time, self.queue_ch1_time.get())
variables.ch1_wave = np.append(variables.ch1_wave, self.queue_ch1_wave.get())
variables.ch2_time = np.append(variables.ch2_time, self.queue_ch2_time.get())
variables.ch2_wave = np.append(variables.ch2_wave, self.queue_ch2_wave.get())
variables.ch3_time = np.append(variables.ch3_time, self.queue_ch3_time.get())
variables.ch3_wave = np.append(variables.ch3_wave, self.queue_ch3_wave.get())
variables.main_v_dc_drs = np.append(variables.main_v_dc_drs,
np.tile(variables.specimen_voltage, len(length)))
variables.main_v_p_drs = np.append(variables.main_v_p_drs,
np.tile(variables.pulse_voltage, len(length)))
# If end of experiment flag is set break the while loop
if variables.end_experiment:
break
def reader_queue_tdc(self):
"""
This class method runs in an infinite loop and listens and reads paramters
over the queues for the group: TDC
This function is called continuously by a separate thread in the main function.
The values read from the queues are updates in imported "variables" file.
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
while True:
# Check if any value is present in queue to read from
while not self.queue_channel.empty() or not self.queue_time_data.empty() or not self.queue_tdc_start_counter.empty():
#Utilize locking mechanism to avoid concurrent use of resources and dirty reads
with self.lock2:
length = self.queue_channel.get()
variables.channel = np.append(variables.channel, length)
variables.time_data = np.append(variables.time_data, self.queue_time_data.get())
variables.tdc_start_counter = np.append(variables.tdc_start_counter,
self.queue_tdc_start_counter.get())
variables.main_v_dc_tdc = np.append(variables.main_v_dc_tdc, np.tile(variables.specimen_voltage, len(length)))
variables.main_v_p_tdc = np.append(variables.main_v_p_tdc, np.tile(variables.pulse_voltage, len(length)))
# If end of experiment flag is set break the while loop
if variables.end_experiment:
break
def main_ex_loop(self, task_counter, counts_target):
"""
This class method:
1. Read the number of detected Ions(in TDC or Counter mode)
2- Calculate the error of detection rate of desire rate
3- Regulate the high voltage and pulser
This function is called in each loop of main function.
Atrributes:
task_counter: Counter edges
counts_target: Calculated paramter(((detection_rate/100)* pulse_frequency)/pulse_frequency)
Returns:
Does not return anything
"""
if variables.counter_source == 'TDC':
variables.total_ions = len(variables.x)
elif variables.counter_source == 'TDC_Raw':
if len(variables.channel) > 0:
variables.total_ions = int(len(variables.channel)/4)
elif variables.counter_source == 'pulse_counter':
# reading detector MCP pulse counter and calculating pulses since last loop iteration
variables.total_ions = task_counter.read(number_of_samples_per_channel=1)[0]
elif variables.counter_source == 'DRS':
pass
variables.count_temp = variables.total_ions - variables.count_last
variables.count_last = variables.total_ions
# saving the values of high dc voltage, pulse, and current iteration ions
variables.main_v_dc = np.append(variables.main_v_dc, variables.specimen_voltage)
variables.main_v_p = np.append(variables.main_v_p, variables.pulse_voltage)
variables.main_counter = np.append(variables.main_counter, variables.count_temp)
# averaging count rate of N_averg counts
variables.avg_n_count = variables.ex_freq * (
sum(variables.main_counter[-variables.cycle_avg:]) / variables.cycle_avg)
counts_measured = variables.avg_n_count / (1 + variables.pulse_frequency * 1000)
counts_error = counts_target - counts_measured # deviation from setpoint
# simple proportional control with averaging
rate = ((variables.avg_n_count * 100) / (1 + variables.pulse_frequency * 1000))
if rate < 0.01 and variables.specimen_voltage < 5000:
ramp_speed_factor = 2.5
else:
ramp_speed_factor = 1
if counts_error > 0:
voltage_step = counts_error * variables.vdc_step_up * ramp_speed_factor
elif counts_error <= 0:
voltage_step = counts_error * variables.vdc_step_down * ramp_speed_factor
# update v_dc
if variables.specimen_voltage < variables.vdc_max:
if variables.specimen_voltage >= variables.vdc_min:
specimen_voltage_temp = variables.specimen_voltage + voltage_step
if specimen_voltage_temp > variables.specimen_voltage:
variables.specimen_voltage = specimen_voltage_temp
# sending VDC via serial
self.command_v_dc(">S0 %s" % (variables.specimen_voltage))
# update pulse voltage v_p
new_vp = variables.specimen_voltage * variables.pulse_fraction * \
(1 / variables.pulse_amp_per_supply_voltage)
if new_vp < variables.pulse_voltage_max and new_vp > variables.pulse_voltage_min:
self.com_port_v_p.write('VOLT %s' % new_vp)
variables.pulse_voltage = new_vp * variables.pulse_amp_per_supply_voltage
variables.main_temperature = np.append(variables.main_temperature, variables.temperature)
variables.main_chamber_vacuum = np.append(variables.main_chamber_vacuum, float(variables.vacuum_main))
def clear_up(self, task_counter):
"""
This fucntion clears global variables and deinitialize high voltage and pulser function
Attributes:
Does not accept any arguments
Returns:
Does not return anything
"""
def cleanup_variables():
"""
Clear up all the global variables
"""
variables.stop_flag = False
variables.end_experiment = False
variables.start_flag = False
# variables.elapsed_time = 0.0
# variables.total_ions = 0
# variables.specimen_voltage = 0.0
# variables.total_count = 0
# variables.avg_n_count = 0
# variables.pulse_voltage = 0.0
variables.detection_rate = 0.0
variables.detection_rate_elapsed = 0.0
variables.count = 0
variables.count_temp = 0
variables.count_last = 0
variables.index_plot = 0
variables.index_save_image = 0
variables.index_wait_on_plot_start = 0
variables.index_plot_save = 0
variables.index_plot = 0
variables.x = np.zeros(0)
variables.y = np.zeros(0)
variables.t = np.zeros(0)
variables.dld_start_counter = np.zeros(0)
variables.channel = np.zeros(0)
variables.time_data = np.zeros(0)
variables.tdc_start_counter = np.zeros(0)
variables.ch0_time = np.zeros(0)
variables.ch0_wave = np.zeros(0)
variables.ch1_time = np.zeros(0)
variables.ch1_wave = np.zeros(0)
variables.ch2_time = np.zeros(0)
variables.ch2_wave = np.zeros(0)
variables.ch3_time = np.zeros(0)
variables.ch3_wave = np.zeros(0)
variables.main_v_dc = np.zeros(0)
variables.main_v_p = np.zeros(0)
variables.main_counter = np.zeros(0)
variables.main_temperature = np.zeros(0)
variables.main_chamber_vacuum = np.zeros(0)
variables.main_v_dc_dld = np.zeros(0)
variables.main_v_p_dld = np.zeros(0)
variables.main_v_dc_tdc = np.zeros(0)
variables.main_v_p_tdc = np.zeros(0)
print('starting to clean up')
# save the data to the HDF5
# Switch off the v_dc
self.command_v_dc('F0')
self.com_port_v_dc.close()
# Switch off the v_p
self.com_port_v_p.write('VOLT 0')
self.com_port_v_p.write('OUTPut OFF')
self.com_port_v_p.close()
# Interrupt the TDC
# device.interrupt_measurement()
if variables.counter_source == 'pulse_counter':
# Close the task of counter
task_counter.stop()
task_counter.close()
# Turn off the signal generator
signal_generator.turn_off_signal_generator()
# Zero variables
cleanup_variables()
print('Clean up is finished')
def main():
"""
Main function for doing experiments
1- Initialize all the devices (High voltage, pulser, TDC or Edge-Counter)
2- Create and start reader DLD and TDC thread
3- Create and start the TDC process if TDC is selected in GUI
4- Iterate over the main loop of experiments and control the experiment frequency
5- Stop the experiment if stop condition is achieved
6- Deinitialize devices
7- Save the data
8- Send email and tweet
"""
# Initialize logger
logger = logging()
logger.info('Experiment is starting')
variables.start_time = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")
# Create and start the TDC process and related queues
if variables.counter_source == 'TDC' or variables.counter_source == 'TDC_Raw':
queue_x = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_y = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_t = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_dld_start_counter = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_channel = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_time_data = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_tdc_start_counter = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_stop_measurement = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch0_time = None
queue_ch0_wave = None
queue_ch1_time = None
queue_ch1_wave = None
queue_ch2_time = None
queue_ch2_wave = None
queue_ch3_time = None
queue_ch3_wave = None
# Initialize and initiate a process(Refer to imported file 'tdc_new' for process function declaration )
# Module used: multiprocessing
tdc_process = multiprocessing.Process(target=tdc_new.experiment_measure, args=(variables.raw_mode, queue_x,
queue_y, queue_t,
queue_dld_start_counter,
queue_channel,
queue_time_data,
queue_tdc_start_counter,
queue_stop_measurement))
tdc_process.daemon = True
tdc_process.start()
elif variables.counter_source == 'DRS':
queue_ch0_time = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch0_wave = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch1_time = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch1_wave = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch2_time = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch2_wave = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch3_time = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch3_wave = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_stop_measurement = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_x = None
queue_y = None
queue_t = None
queue_dld_start_counter = None
queue_channel = None
queue_time_data = None
queue_tdc_start_counter = None
# Initialize and initiate a process(Refer to imported file 'drs' for process function declaration)
# Module used: multiprocessing
drs_process = multiprocessing.Process(target=drs.experiment_measure, args=(queue_ch0_time, queue_ch0_wave,
queue_ch1_time, queue_ch1_wave,
queue_ch2_time, queue_ch2_wave,
queue_ch3_time, queue_ch3_wave,
queue_stop_measurement))
drs_process.daemon = True
drs_process.start()
else:
queue_x = None
queue_y = None
queue_t = None
queue_dld_start_counter = None
queue_channel = None
queue_time_data = None
queue_tdc_start_counter = None
queue_ch0_time = None
queue_ch0_wave = None
queue_ch1_time = None
queue_ch1_wave = None
queue_ch2_time = None
queue_ch2_wave = None
queue_ch3_time = None
queue_ch3_wave = None
# Initialize lock that is used by TDC and DLD threads
# Module used: threading
lock1 = threading.Lock()
lock2 = threading.Lock()
# Create the experiment object
experiment = OXCART(queue_x, queue_y, queue_t, queue_dld_start_counter,
queue_channel, queue_time_data, queue_tdc_start_counter,
queue_ch0_time, queue_ch0_wave, queue_ch1_time, queue_ch1_wave,
queue_ch2_time, queue_ch2_wave, queue_ch3_time, queue_ch3_wave,
lock1, lock2)
# Initialize the signal generator
signal_generator.initialize_signal_generator(variables.pulse_frequency)
# Initialize high voltage
experiment.initialize_v_dc()
logger.info('High voltage is initialized')
# Initialize pulser
experiment.initialize_v_p()
logger.info('Pulser is initialized')
if variables.counter_source == 'pulse_counter':
task_counter = experiment.initialize_counter()
logger.info('Edge counter is initialized')
else:
task_counter = None
# start the timer for main experiment
variables.specimen_voltage = variables.vdc_min
variables.pulse_voltage_min = variables.v_p_min * (1 / variables.pulse_amp_per_supply_voltage)
variables.pulse_voltage_max = variables.v_p_max * (1 / variables.pulse_amp_per_supply_voltage)
variables.pulse_voltage = variables.v_p_min
time_ex_s = np.zeros(0)
time_ex_m = np.zeros(0)
time_ex_h = np.zeros(0)
time_counter = np.zeros(0)
counts_target = ((variables.detection_rate / 100) * variables.pulse_frequency) / variables.pulse_frequency
logger.info('Starting the main loop')
# Initialze threads that will read from the queue for the group: dld
if variables.counter_source == 'TDC':
read_dld_queue_thread = threading.Thread(target=experiment.reader_queue_dld)
read_dld_queue_thread.setDaemon(True)
read_dld_queue_thread.start()
# Initialze threads that will read from the queue for the group: tdc
elif variables.counter_source == 'TDC_Raw':
read_tdc_queue_thread = threading.Thread(target=experiment.reader_queue_tdc)
read_tdc_queue_thread.setDaemon(True)
read_tdc_queue_thread.start()
# Initialze threads that will read from the queue for the group: drs
elif variables.counter_source == 'DRS':
read_drs_queue_thread = threading.Thread(target=experiment.reader_queue_drs)
read_drs_queue_thread.setDaemon(True)
read_drs_queue_thread.start()
total_steps = variables.ex_time * variables.ex_freq
steps = 0
flag_achieved_high_voltage = 0
index_time = 0
ex_time_temp = variables.ex_time
# Main loop of experiment
while steps < total_steps:
# Only for initializing every thing at firs iteration
if steps == 0:
# Turn on the v_dc and v_p
experiment.com_port_v_p.write('OUTPut ON')
time.sleep(0.5)
experiment.command_v_dc("F1")
time.sleep(0.5)
if variables.counter_source == 'pulse_counter':
# start the Counter
task_counter.start()
variables.start_flag = True
# Wait for 4 second to all devices get ready
time.sleep(4)
# Total experiment time variable
start_main_ex = time.time()
print('Experiment is started')
logger.info('Experiment is started')
# Measure time
start = datetime.datetime.now()
# main loop function
experiment.main_ex_loop(task_counter, counts_target)
end = datetime.datetime.now()
# If the main experiment function takes less than experiment frequency we have to waite
if (1000 / variables.ex_freq) > ((end - start).microseconds / 1000): # time in milliseconds
sleep_time = ((1000 / variables.ex_freq) - ((end - start).microseconds / 1000))
time.sleep(sleep_time / 1000)
else:
print(
f"{initialize_devices.bcolors.WARNING}Warning: Experiment loop takes longer than %s Millisecond{initialize_devices.bcolors.ENDC}" % (int(1000 / variables.ex_freq)))
logger.error('Experiment loop takes longer than %s Millisecond' % (int(1000 / variables.ex_freq)))
print('%s- The iteration time:' %index_time, ((end - start).microseconds / 1000))
index_time += 1
time_ex_s = np.append(time_ex_s, int(end.strftime("%S")))
time_ex_m = np.append(time_ex_m, int(end.strftime("%M")))
time_ex_h = np.append(time_ex_h, int(end.strftime("%H")))
end_main_ex_loop = time.time()
variables.elapsed_time = end_main_ex_loop - start_main_ex
# Counter of iteration
time_counter = np.append(time_counter, steps)
steps += 1
if variables.stop_flag:
print('Experiment is stopped by user')
logger.info('Experiment is stopped by user')
if variables.counter_source == 'TDC' or variables.counter_source == 'TDC_Raw':
queue_stop_measurement.put(True)
time.sleep(1)
break
if variables.criteria_ions:
if variables.max_ions <= variables.total_ions:
print('Total number of Ions is achieved')
logger.info('Total number of Ions is achieved')
if variables.counter_source == 'TDC'or variables.counter_source == 'TDC_Raw':
queue_stop_measurement.put(True)
time.sleep(1)
break
if variables.criteria_vdc:
if variables.vdc_max <= variables.specimen_voltage:
if flag_achieved_high_voltage > variables.ex_freq * 10:
print('High Voltage Max. is achieved')
logger.info('High Voltage Max. is achieved')
time.sleep(1)
break
flag_achieved_high_voltage += 1
if variables.ex_time != ex_time_temp:
total_steps = variables.ex_time * variables.ex_freq - steps
ex_time_temp = variables.ex_time
# Because experiment time is not a stop criteria, increase total_steps
if not variables.criteria_time and steps+1==total_steps:
total_steps += 1
# Stop the TDC process
try:
if variables.counter_source == 'TDC'or variables.counter_source == 'TDC_Raw':
tdc_process.join(3)
if tdc_process.is_alive():
tdc_process.terminate()
tdc_process.join(1)
# Release all the resources of the TDC process
tdc_process.close()
elif variables.counter_source == 'DRS':
drs_process.join(3)
if drs_process.is_alive():
drs_process.terminate()
drs_process.join(1)
# Release all the resources of the TDC process
drs_process.close()
except:
print(
f"{initialize_devices.bcolors.WARNING}Warning: The TDC or DRS process cannot be terminated properly{initialize_devices.bcolors.ENDC}")
variables.end_experiment = True
time.sleep(1)
# Stop the TDC and DLD thread
if variables.counter_source == 'TDC':
read_dld_queue_thread.join(1)
elif variables.counter_source == 'TDC_Raw':
read_tdc_queue_thread.join(1)
elif variables.counter_source == 'DRS':
read_drs_queue_thread.join(1)
if variables.counter_source == 'TDC':
variables.total_ions = len(variables.x)
elif variables.counter_source == 'TDC_Raw':
variables.total_ions = int(len(variables.channel) / 4)
elif variables.counter_source == 'DRS':
pass
time.sleep(1)
print('Experiment is finished')
logger.info('Experiment is finished')
# Check the length of arrays to be equal
if variables.counter_source == 'TDC':
if all(len(lst) == len(variables.x) for lst in [variables.x, variables.y,
variables.t, variables.dld_start_counter,
variables.main_v_dc_dld, variables.main_v_dc_dld]):
logger.warning('dld data have not same length')
elif variables.counter_source == 'TDC_Raw':
if all(len(lst) == len(variables.channel) for lst in [variables.channel, variables.time_data,
variables.tdc_start_counter,
variables.main_v_dc_tdc, variables.main_v_p_tdc]):
logger.warning('tdc data have not same length')
elif variables.counter_source == 'DRS':
if all(len(lst) == len(variables.ch0_time) for lst in [variables.ch0_wave, variables.ch1_time,
variables.ch1_wave,variables.ch2_time,
variables.ch2_wave,variables.ch3_time,
variables.ch3_wave,
variables.main_v_dc_drs, variables.main_v_p_drs]):
logger.warning('tdc data have not same length')
# save hdf5 file
with h5py.File(variables.path + '\\%s_data.h5' % variables.hdf5_path, "w") as f:
f.create_dataset("oxcart/high_voltage", data=variables.main_v_dc, dtype='f')
f.create_dataset("oxcart/pulse_voltage", data=variables.main_v_p, dtype='f')
f.create_dataset("oxcart/num_events", data=variables.main_counter, dtype='i')
f.create_dataset('oxcart/temperature', data=variables.main_temperature, dtype='f')
f.create_dataset('oxcart/main_chamber_vacuum', data=variables.main_chamber_vacuum, dtype='f')
f.create_dataset("oxcart/time_counter", data=time_counter, dtype='i')
f.create_dataset("time/time_s", data=time_ex_s, dtype='i')
f.create_dataset("time/time_m", data=time_ex_m, dtype='i')
f.create_dataset("time/time_h", data=time_ex_h, dtype='i')
if variables.counter_source == 'TDC':
f.create_dataset("dld/x", data=variables.x, dtype='i')
f.create_dataset("dld/y", data=variables.y, dtype='i')
f.create_dataset("dld/t", data=variables.t, dtype='i')
f.create_dataset("dld/start_counter", data=variables.dld_start_counter, dtype='i')
f.create_dataset("dld/high_voltage", data=variables.main_v_dc_dld, dtype='f')
f.create_dataset("dld/pulse_voltage", data=variables.main_v_p_dld, dtype='f')
elif variables.counter_source == 'TDC_Raw':
f.create_dataset("tdc/start_counter", data=variables.tdc_start_counter, dtype='i')
f.create_dataset("tdc/channel", data=variables.channel, dtype='i')
f.create_dataset("tdc/time_data", data=variables.time_data, dtype='i')
f.create_dataset("tdc/high_voltage", data=variables.main_v_dc_tdc, dtype='f')
f.create_dataset("tdc/pulse_voltage", data=variables.main_v_p_tdc, dtype='f')
elif variables.counter_source == 'DRS':
f.create_dataset("drs/ch0_time", data=variables.ch0_time, dtype='f')
f.create_dataset("drs/ch0_wave", data=variables.ch0_wave, dtype='f')
f.create_dataset("drs/ch1_time", data=variables.ch1_time, dtype='f')
f.create_dataset("drs/ch1_wave", data=variables.ch1_wave, dtype='f')
f.create_dataset("drs/ch2_time", data=variables.ch2_time, dtype='f')
f.create_dataset("drs/ch2_wave", data=variables.ch2_wave, dtype='f')
f.create_dataset("drs/ch3_time", data=variables.ch3_time, dtype='f')
f.create_dataset("drs/ch3_wave", data=variables.ch3_wave, dtype='f')
f.create_dataset("drs/high_voltage", data=variables.main_v_dc_drs, dtype='f')
f.create_dataset("drs/pulse_voltage", data=variables.main_v_p_drs, dtype='f')
logger.info('HDF5 file is created')
variables.end_time = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")
# Save new value of experiment counter
with open('./png/counter.txt', 'w') as f:
f.write(str(variables.counter + 1))
logger.info('Experiment counter is increased')
# Adding results of the experiment to the log file
logger.info('Total number of Ions is: %s' % variables.total_ions)
# send a Tweet
if variables.tweet:
message_tweet = 'The Experiment %s finished\n' \
'Total number of Ions is: %s' % (variables.hdf5_path,
variables.total_ions)
tweet_send.tweet_send(message_tweet)
logger.info('Tweet is sent')
# send an email
subject = 'Oxcart Experiment {} Report'.format(variables.hdf5_path)
elapsed_time_temp = float("{:.3f}".format(variables.elapsed_time))
message = 'The experiment was started at: {}\n' \
'The experiment was ended at: {}\n' \
'Experiment duration: {}\n' \
'Total number of ions: {}\n'.format(variables.start_time,
variables.end_time, elapsed_time_temp, variables.total_ions)
if len(variables.email) > 3:
logger.info('Email is sent')
email_send.send_email(variables.email, subject, message)
# save setup parameters and run statistics in a txt file
with open(variables.path + '\\parameters.txt', 'w') as f:
f.write('Username: ' + variables.user_name + '\r\n')
f.write('Experiment Name: ' + variables.hdf5_path + '\r\n')
f.write('Detection Rate ('+chr(37)+') : %s\r\n' % variables.detection_rate)
f.write('Maximum Number of Ions: %s\r\n' % variables.max_ions)
f.write('Counter source: %s\r\n' % variables.counter_source)
f.write('Control Refresh freq. (Hz): %s\r\n' % variables.ex_freq)
f.write('Time bins (Sec): %s\r\n' % (1/variables.ex_freq))
f.write('Cycle for Avg.: %s\r\n' % variables.cycle_avg)
f.write('K_p Upwards: %s\r\n' % variables.vdc_step_up)
f.write('K_p Downwards: %s\r\n' % variables.vdc_step_down)
f.write('Experiment Elapsed Time (Sec): %s\r\n' % "{:.3f}".format(variables.elapsed_time))
f.write('Experiment Total Ions: %s\r\n' % variables.total_ions)
f.write('Email: ' + variables.email + '\r\n')
f.write('Twitter: %s\r\n' % variables.tweet)
f.write('Specimen start Voltage (V): %s\r\n' % variables.vdc_min)
f.write('Specimen Stop Voltage (V): %s\r\n' % variables.vdc_max)
f.write('Specimen Max Achieved Voltage (V): %s\r\n' % "{:.3f}".format(variables.specimen_voltage))
f.write('Pulse start Voltage (V): %s\r\n' % variables.v_p_min)
f.write('Pulse Stop Voltage (V): %s\r\n' % variables.v_p_max)
f.write('Pulse Fraction ('+chr(37)+'): %s\r\n' % variables.pulse_fraction)
f.write('Specimen Max Achieved Pulse Voltage (V): %s\r\n' % "{:.3f}".format(variables.pulse_voltage))
# Clear up all the variables and deinitialize devices
experiment.clear_up(task_counter)
logger.info('Variables and devices is cleared')
|
solver_wrapper.py
|
import logging
import multiprocessing
import os
import subprocess
from pysat.solvers import Solver
from dfainductor.logging_utils import log_info
class SolverWrapper:
def __init__(self, name):
logger_format = '%(asctime)s:%(threadName)s:%(message)s'
logging.basicConfig(format=logger_format, level=logging.INFO, datefmt="%H:%M:%S")
def nof_vars(self):
pass
def nof_clauses(self):
pass
def append_formula(self, formula):
pass
def add_clause(self, clause):
pass
def solve(self, assumptions):
pass
def get_model(self):
pass
def run_solver(solver):
"""
Run a single solver on a given formula.
"""
logging.info(f"starting {solver}")
res = solver.solve()
logging.info(f"finished {solver} -- {res} outcome")
class ParallelSolverPortfolio(SolverWrapper):
def __init__(self, name):
super().__init__(name)
self.solvers = []
def add_solver(self, solver):
self.solvers.append(Solver(solver))
def solve(self, assumptions):
logging.info("Parallel solving started")
logging.info("Creating tasks")
if __name__ == '__main__':
threads = [multiprocessing.Process(target=run_solver, args=solver) for solver in self.solvers]
for thread in threads:
thread.start()
for thread in threads:
thread.join() # waits for thread to complete its task
logging.info("Main Ended")
else:
logging.info("Name is not main")
def append_formula(self, formula):
for solver in self.solvers:
solver.append_formula(formula)
def add_clause(self, clause):
for solver in self.solvers:
solver.add_clause(clause)
def get_model(self):
for solver in self.solvers:
solver.get_model()
class ParallelSolverPathToFile(SolverWrapper):
def get_model(self):
log_info(self.result)
if self.result:
self.answer = self.answer[2:]
res = [int(x) for x in self.answer.split(' ')]
log_info(str(res))
return res
else:
print("No answer")
def write_to_file(self):
file = open("dfainductor/parallel/inputDKA.cnf", "w+", encoding="utf8")
file.write("c A sample .cnf file\n")
# log_info("c A sample .cnf file\n")
file.write("p cnf " + str(self.amount_of_variables) + " " + str(len(self.list_of_clauses)) + "\n")
# log_info("p cnf " + str(self.amount_of_variables) + " " + str(self.list_of_clauses) + "\n")
for clause in self.list_of_clauses:
file.write(str(len(clause)) + " " + " ".join(str(x) for x in clause) + " 0" + " \n")
# converted_list = [str(element) for clause in self.list_of_clauses for element in clause]
# converted_list_of_list = [str]
# file.write(",".join(converted_list) + "\n")
# log_info(",".join(converted_list) + "\n")
@staticmethod
def execute():
# input - аргументы к испольняемому запросу
exit_code = subprocess.run(['./dfainductor/parallel/starexec_run_Version_1.sh', 'input33.cnf'], shell=True, capture_output=True, text=True)
# exit = subprocess.Popen(['./dfainductor/parallel/starexec_run_Version_1.sh', 'input33.cnf'], stdout=subprocess.PIPE)
# exit_code = exit.communicate()
# res = exit_code.stdout.decode('utf-8')
res = exit_code
result = res.stdout.split("\n")[-3]
# log_info(res.split("\n")[-3])
log_info(res.stdout.split("\n")[-2])
if result == "s SATISFIABLE":
print("yes")
return True, res.stdout.split("\n")[-2]
# return True, res.stdout.split("\n")[-2]
else:
print("no")
# print(exit_code.stdout.split("\n")[-3])
return False, None
def solve(self, assumptions=""):
self.write_to_file()
self.result, self.answer = self.execute()
log_info(str(self.result) + " should be")
return self.result
def __init__(self, name):
super().__init__(name)
self.name = name
self.list_of_clauses = []
self.amount_of_variables = 0
self.result = False
self.answer = None
def add_clause(self, clause):
self.amount_of_variables = max(self.amount_of_variables, len(clause))
self.list_of_clauses.append(clause)
def append_formula(self, formula):
for clause in formula:
self.add_clause(clause)
|
Wallet.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Tkinter GUI Wallet (v2.52)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
import os
import sys
from base64 import b64decode, b64encode
from configparser import ConfigParser
from datetime import datetime
from json import loads
from json import loads as jsonloads
from locale import getdefaultlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from socket import socket
from sqlite3 import connect as sqlconn
import subprocess
from threading import Thread, Timer
from time import sleep, time
from tkinter import (BOTH, END, LEFT, RIGHT, Button, Checkbutton, E, Entry,
Frame, IntVar, Label, Listbox, N, PhotoImage, S,
Scrollbar, StringVar, Tk, Toplevel, W, messagebox, ttk)
from tkinter.font import Font
from urllib.request import urlopen, urlretrieve
from webbrowser import open_new_tab
from requests import get
# Version number
from EllipticCurves import EllipticCurves
VERSION = 2.52
# Colors
BACKGROUND_COLOR = "#121212"
FONT_COLOR = "#fffdee"
FOREGROUND_COLOR = "#ff9f43"
FOREGROUND_COLOR_SECONDARY = "#fdcb6e"
# Minimum transaction amount to be saved
MIN_TRANSACTION_VALUE = 0.00000000001
# Minimum transaction amount to show a notification
MIN_TRANSACTION_VALUE_NOTIFY = 0.5
# Resources folder location
resources = "Wallet_" + str(VERSION) + "_resources/"
ENCRYPTION_ITERATIONS = 100_000
config = ConfigParser()
wrong_passphrase = False
global_balance = 0
oldbalance = 0
balance = 0
unpaid_balance = 0
profitCheck = 0
curr_bal = 0
WS_URI = "wss://server.duinocoin.com:15808"
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def get_duco_price():
global duco_fiat_value
jsonapi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if jsonapi.status_code == 200:
try:
content = jsonapi.content.decode()
contentjson = loads(content)
duco_fiat_value = round(float(contentjson["Duco price"]), 4)
except Exception:
duco_fiat_value = 0.003
else:
duco_fiat_value = 0.003
Timer(30, get_duco_price).start()
def title(title):
if osname == "nt":
system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def _derive_key(
password: bytes,
salt: bytes,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=ENCRYPTION_ITERATIONS,
backend=backend)
return b64e(kdf.derive(password))
def password_encrypt(
message: bytes,
password: str,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
salt = secrets.token_bytes(16)
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return b64e(
b"%b%b%b" % (
salt,
ENCRYPTION_ITERATIONS.to_bytes(4, "big"),
b64d(Fernet(key).encrypt(message))))
def password_decrypt(
token: bytes,
password: str) -> bytes:
decoded = b64d(token)
salt, ENCRYPTION_ITERATIONS, token = decoded[:16], decoded[16:20], b64e(
decoded[20:])
ENCRYPTION_ITERATIONS = int.from_bytes(ENCRYPTION_ITERATIONS, "big")
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return Fernet(key).decrypt(token)
def get_string(string_name):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def openTos(handler):
open_new_tab("https://github.com/revoxhere/duino-coin#terms-of-usage")
def openGitHub(handler):
open_new_tab("https://github.com/revoxhere/duino-coin")
def openWebsite(handler):
open_new_tab("https://duinocoin.com")
def openExchange(handler):
open_new_tab("https://revoxhere.github.io/duco-exchange/")
def openDiscord(handler):
open_new_tab("https://discord.com/invite/kvBkccy")
def openTransaction(hashToOpen):
open_new_tab("https://explorer.duinocoin.com/?search="+str(hashToOpen))
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
master.title("Login")
master.resizable(False, False)
TEXT_FONT_BOLD = Font(size=12, weight="bold")
TEXT_FONT = Font(size=12, weight="normal")
self.duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
self.duco.image = self.duco
self.ducoLabel = Label(
self, background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=self.duco)
self.ducoLabel2 = Label(
self,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("welcome_message"),
font=TEXT_FONT_BOLD)
self.spacer = Label(self)
self.label_username = Label(
self,
text=get_string("username"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.label_password = Label(
self,
text=get_string("passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.entry_username = Entry(
self,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.entry_password = Entry(
self,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.ducoLabel.grid(
row=0,
sticky="nswe",
pady=(5, 0),
padx=(5))
self.ducoLabel2.grid(
row=1,
sticky="nswe",
padx=(5))
self.label_username.grid(
row=4,
sticky=W,
pady=(5, 0))
self.entry_username.grid(
row=5,
sticky=N,
padx=(5))
self.label_password.grid(
row=6,
sticky=W)
self.entry_password.grid(
row=7,
sticky=N)
self.logbtn = Button(
self,
text=get_string("login"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._login_btn_clicked,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(5, 1))
self.regbtn = Button(
self,
text=get_string("register"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._register_btn_clicked,
font=TEXT_FONT_BOLD)
self.regbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(0, 5))
self.configure(background=BACKGROUND_COLOR)
self.master.bind(
"<Return>",
self._login_btn_clicked_bind)
self.pack()
def _login_btn_clicked_bind(self, event):
self._login_btn_clicked()
def _login_btn_clicked(self):
global username, password
username = self.entry_username.get()
password = self.entry_password.get()
if username and password:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv().rstrip("\n")
response = response.split(",")
if response[0] == "OK":
passwordEnc = b64encode(bytes(password, encoding="utf8"))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO
UserData(username, password, useWrapper)
VALUES(?, ?, ?)""",
(username, passwordEnc, "False"))
con.commit()
root.destroy()
else:
messagebox.showerror(
title=get_string("login_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("login_error"),
message=get_string("fill_the_blanks_warning"))
def _registerprotocol(self):
emailS = email.get()
usernameS = username.get()
passwordS = password.get()
confpasswordS = confpassword.get()
if emailS and usernameS and passwordS and confpasswordS:
if passwordS == confpasswordS:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(
bytes(
"REGI,"
+ str(usernameS)
+ ","
+ str(passwordS)
+ ","
+ str(emailS),
encoding="utf8"))
response = soc.recv().rstrip("\n")
response = response.split(",")
if response[0] == "OK":
messagebox.showinfo(
title=get_string("registration_success"),
message=get_string("registration_success_msg"))
register.destroy()
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("register_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("fill_the_blanks_warning"))
def _register_btn_clicked(self):
global username, password, confpassword, email, register
root.destroy()
register = Tk()
register.title(get_string("register"))
register.resizable(False, False)
TEXT_FONT_BOLD = Font(
register,
size=12,
weight="bold")
TEXT_FONT = Font(
register,
size=12,
weight="normal")
tos_warning = get_string("register_tos_warning")
import textwrap
tos_warning = textwrap.dedent(tos_warning)
tos_warning = "\n".join(l for line in tos_warning.splitlines()
for l in textwrap.wrap(line, width=20))
duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
duco.image = duco
ducoLabel = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=duco)
ducoLabel.grid(
row=0,
padx=5,
pady=(5, 0),
sticky="nswe")
ducoLabel2 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("register_on_network"),
font=TEXT_FONT_BOLD)
ducoLabel2.grid(row=1,
padx=5,
sticky="nswe")
def colorLabelBlue(handler):
ducoLabel3.configure(foreground="#6c5ce7")
def colorLabelNormal(handler):
ducoLabel3.configure(foreground=FONT_COLOR)
ducoLabel3 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=tos_warning,
font=TEXT_FONT)
ducoLabel3.grid(
row=2,
padx=5,
sticky="nswe")
ducoLabel3.bind("<Button-1>", openTos)
ducoLabel3.bind("<Enter>", colorLabelBlue)
ducoLabel3.bind("<Leave>", colorLabelNormal)
Label(
register,
text=get_string("username").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=3,
sticky=W,
padx=5,
pady=(5, 0))
username = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
username.grid(
row=4,
padx=5)
Label(
register,
text=get_string("passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=5,
sticky=W,
padx=5)
password = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
password.grid(
row=6,
padx=5)
Label(
register,
text=get_string("confirm_passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=7,
sticky=W,
padx=5)
confpassword = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
confpassword.grid(
row=8,
padx=5)
Label(
register,
text=get_string("email").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=9,
sticky=W,
padx=5)
email = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
email.grid(
row=10,
padx=5)
self.logbtn = Button(
register,
text=get_string("register"),
activebackground=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
command=self._registerprotocol,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5, 5),
pady=(5, 5))
register.configure(background=BACKGROUND_COLOR)
def loading_window():
global loading, status
loading = Tk()
loading.resizable(False, False)
loading.configure(background=BACKGROUND_COLOR)
loading.title(get_string("loading"))
try:
loading.iconphoto(True,
PhotoImage(file=resources + "duco_color.png"))
except Exception:
pass
TEXT_FONT = Font(loading,
size=10,
weight="bold")
TEXT_FONT_BOLD = Font(loading,
size=14,
weight="bold")
original = Image.open(resources + "duco_color.png")
resized = original.resize((128, 128), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(loading,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(row=0,
column=0,
sticky=N + S + E + W,
pady=(5, 0),
padx=(5))
Label(
loading,
text=get_string("duino_coin_wallet"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=1,
column=0,
sticky=S + W,
pady=(5, 0),
padx=5)
loading.update()
status = Label(
loading,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("loading_database"),
font=TEXT_FONT)
status.grid(
row=2,
column=0,
sticky=S + W,
pady=(0, 5),
padx=5)
loading.update()
def transactions_window(handler):
transactionsWindow = Toplevel()
transactionsWindow.resizable(False, False)
transactionsWindow.title(get_string("wallet_transactions"))
transactionsWindow.transient([root])
transactionsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
transactionsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
transactionsWindow,
size=12,
weight="normal")
Label(
transactionsWindow,
text=get_string("transaction_list"),
font=TEXT_FONT_BOLD_LARGE,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
Label(
transactionsWindow,
text=get_string("transaction_list_notice"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
listbox = Listbox(
transactionsWindow,
width="35",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
listbox.grid(
row=2,
column=0,
sticky=S + W + N + E,
padx=(5, 0),
pady=(0, 5))
scrollbar = Scrollbar(transactionsWindow,
background=BACKGROUND_COLOR)
scrollbar.grid(
row=2,
column=1,
sticky=N + S,
padx=(0, 5),
pady=(0, 5))
for i in gtxl:
listbox.insert(END, gtxl[i]["Sender"] + " to " + gtxl[i]
["Recipient"] + ": " + str(gtxl[i]["Amount"]) + " DUCO")
def get_selection(event):
try:
selection = listbox.curselection()[0]
openTransaction(gtxl[str(selection)]["Hash"])
except IndexError:
pass
listbox.bind("<Button-1>", get_selection)
listbox.config(yscrollcommand=scrollbar.set, font=TEXT_FONT)
scrollbar.config(command=listbox.yview)
def currency_converter_calc():
fromcurrency = fromCurrencyInput.get(fromCurrencyInput.curselection())
tocurrency = toCurrencyInput.get(toCurrencyInput.curselection())
amount = amountInput.get()
# TODO
value = duco_fiat_value * float(amount)
result = get_string("result") + ": " + str(round(value, 6))
conversionresulttext.set(str(result))
calculatorWindow.update()
def currency_converter_window(handler):
global conversionresulttext
global fromCurrencyInput
global toCurrencyInput
global amountInput
global calculatorWindow
calculatorWindow = Toplevel()
calculatorWindow.resizable(False, False)
calculatorWindow.title(get_string("wallet_calculator"))
calculatorWindow.transient([root])
calculatorWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(
calculatorWindow,
size=12,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
calculatorWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
calculatorWindow,
size=12,
weight="normal")
Label(
calculatorWindow,
text=get_string("currency_converter"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
columnspan=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Label(
calculatorWindow,
text=get_string("from"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=0,
sticky=S + W,
padx=5)
fromCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
font=TEXT_FONT,
foreground=FONT_COLOR,
width="20",
height="13",
)
fromCurrencyInput.grid(row=2,
column=0,
sticky=S + W,
padx=(5, 0))
fromCurrencyInput.insert(0, "DUCO")
vsb = Scrollbar(
calculatorWindow,
orient="vertical",
command=fromCurrencyInput.yview,
background=BACKGROUND_COLOR,
)
vsb.grid(row=2,
column=1,
sticky="ns",
padx=(0, 5))
fromCurrencyInput.configure(yscrollcommand=vsb.set)
fromCurrencyInput.select_set(0)
fromCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("to"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=3,
columnspan=2,
sticky=S + W,
padx=5)
toCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
foreground=FONT_COLOR,
font=TEXT_FONT,
width="20",
height="13")
toCurrencyInput.grid(
row=2,
column=3,
sticky=S + W,
padx=(5, 0))
toCurrencyInput.insert(0, "USD")
vsb2 = Scrollbar(
calculatorWindow,
orient="vertical",
command=toCurrencyInput.yview,
background=BACKGROUND_COLOR,)
vsb2.grid(
row=2,
column=4,
sticky="ns",
padx=(0, 5))
toCurrencyInput.configure(yscrollcommand=vsb2.set)
toCurrencyInput.select_set(0)
toCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("input_amount"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=3,
columnspan=2,
column=0,
sticky=S + W,
padx=5)
def clear_ccamount_placeholder(self):
amountInput.delete("0", "100")
amountInput = Entry(
calculatorWindow,
foreground=FOREGROUND_COLOR_SECONDARY,
border="0",
font=TEXT_FONT,
background=BACKGROUND_COLOR,)
amountInput.grid(
row=4,
column=0,
sticky=N + S + W + E,
padx=5,
columnspan=2,
pady=(0, 5))
amountInput.insert("0", str(global_balance))
amountInput.bind("<FocusIn>", clear_ccamount_placeholder)
Button(
calculatorWindow,
text=get_string("calculate"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
background=BACKGROUND_COLOR,
command=currency_converter_calc,
).grid(row=3,
columnspan=2,
column=2,
sticky=N + S + W + E,
pady=(5, 0),
padx=5)
conversionresulttext = StringVar(calculatorWindow)
conversionresulttext.set(get_string("result") + ": 0.0")
conversionresultLabel = Label(
calculatorWindow,
textvariable=conversionresulttext,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,)
conversionresultLabel.grid(
row=4,
columnspan=2,
column=2,
pady=(0, 5))
calculatorWindow.mainloop()
def statistics_window(handler):
statsApi = get(
"https://server.duinocoin.com"
+ "/api.json",
data=None)
if statsApi.status_code == 200: # Check for reponse
statsApi = statsApi.json()
miner_api = get(
"https://server.duinocoin.com"
+ "/miners.json",
data=None)
if miner_api.status_code == 200: # Check for reponse
miner_api = miner_api.json()
statsWindow = Toplevel()
statsWindow.resizable(False, False)
statsWindow.title(get_string("statistics_title"))
statsWindow.transient([root])
statsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
statsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
statsWindow,
size=12,
weight="normal")
Active_workers_listbox = Listbox(
statsWindow,
exportselection=False,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
border="0",
font=TEXT_FONT,
width="65",
height="8",)
Active_workers_listbox.grid(
row=1,
columnspan=2,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
i = 0
totalHashrate = 0
for threadid in miner_api:
if username in miner_api[threadid]["User"]:
rigId = miner_api[threadid]["Identifier"]
if rigId == "None":
rigId = ""
else:
rigId += ": "
software = miner_api[threadid]["Software"]
hashrate = str(round(miner_api[threadid]["Hashrate"], 2))
totalHashrate += float(hashrate)
difficulty = str(miner_api[threadid]["Diff"])
shares = (
str(miner_api[threadid]["Accepted"])
+ "/"
+ str(
miner_api[threadid]["Accepted"]
+ miner_api[threadid]["Rejected"]))
Active_workers_listbox.insert(
i,
"#"
+ str(i + 1)
+ ": "
+ rigId
+ software
+ " "
+ str(round(float(hashrate) / 1000, 2))
+ " kH/s @ diff "
+ difficulty
+ ", "
+ shares)
i += 1
if i == 0:
Active_workers_listbox.insert(
i, get_string("statistics_miner_warning"))
totalHashrateString = str(int(totalHashrate)) + " H/s"
if totalHashrate > 1000000000:
totalHashrateString = str(
round(totalHashrate / 1000000000, 2)) + " GH/s"
elif totalHashrate > 1000000:
totalHashrateString = str(round(totalHashrate / 1000000, 2)) + " MH/s"
elif totalHashrate > 1000:
totalHashrateString = str(round(totalHashrate / 1000, 2)) + " kH/s"
Active_workers_listbox.configure(height=i)
Active_workers_listbox.select_set(32)
Active_workers_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("your_miners") + " - " + totalHashrateString,
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=5,
padx=5)
Label(
statsWindow,
text=get_string("richlist"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Top_10_listbox = Listbox(
statsWindow,
exportselection=False,
border="0",
font=TEXT_FONT,
width="30",
height="10",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
Top_10_listbox.grid(
row=3,
column=0,
rowspan=10,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
num = 0
for i in statsApi["Top 10 richest miners"]:
Top_10_listbox.insert(num, i)
num += 1
Top_10_listbox.select_set(32)
Top_10_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("network_info"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=2,
column=1,
sticky=S + W,
padx=5,
pady=5)
Label(
statsWindow,
text=get_string("difficulty")
+ ": "
+ str(statsApi["Current difficulty"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=3,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_blocks")
+ ": "
+ str(statsApi["Mined blocks"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("network_hashrate")
+ ": "
+ str(statsApi["Pool hashrate"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("active_miners")
+ ": "
+ str(len(statsApi["Miners"])),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text="1 DUCO "
+ get_string("estimated_price")
+ ": $"
+ str(statsApi["Duco price"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=7,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("registered_users")
+ ": "
+ str(statsApi["Registered users"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=8,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_duco")
+ ": "
+ str(statsApi["All-time mined DUCO"])
+ " ᕲ",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=9,
column=1,
sticky=S + W,
padx=5)
statsWindow.mainloop()
def wrapper_window(handler):
def Wrap():
amount = amountWrap.get()
print("Got amount:", amount)
print("pub key:", pub_key)
soc = websocket.create_connection(WS_URI)
soc.recv()
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
_ = soc.recv()
soc.send(
bytes(
"WRAP,"
+ str(amount)
+ ","
+ str(pub_key)
+ str(",placeholder"),
encoding="utf8"))
soc.close()
sleep(2)
wrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
pub_key = pubkeyfile.read()
pubkeyfile.close()
wrapperWindow = Toplevel()
wrapperWindow.resizable(False, False)
wrapperWindow.title(get_string("wrapper_title"))
wrapperWindow.transient([root])
askWrapAmount = Label(
wrapperWindow,
text=get_string("wrapper_amount_to_wrap") + ":")
askWrapAmount.grid(row=0,
column=0,
sticky=N + W)
amountWrap = Entry(wrapperWindow,
border="0",
font=Font(size=15))
amountWrap.grid(row=1,
column=0,
sticky=N + W)
wrapButton = Button(wrapperWindow,
text="Wrap",
command=Wrap)
wrapButton.grid(row=2,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error_tronpy"))
def unwrapper_window(handler):
def UnWrap():
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
passphrase = passphraseEntry.get()
privkeyfile = open(str(resources + "DUCOPrivKey.encrypt"), "r")
privKeyEnc = privkeyfile.read()
privkeyfile.close()
try:
priv_key = str(password_decrypt(privKeyEnc, passphrase))[2:66]
use_wrapper = True
except InvalidToken:
print(get_string("invalid_passphrase"))
use_wrapper = False
amount = amountUnWrap.get()
print("Got amount:", amount)
soc = websocket.create_connection(WS_URI)
soc.recv()
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv()
if use_wrapper:
pendingvalues = wduco.functions.pendingWithdrawals(
pub_key, username)
# transaction wasn't initiated, but variable should be declared
txn_success = False
try:
amount = float(amount)
except ValueError:
print("Value should be numeric - aborting")
else:
if int(float(amount) * 10 ** 6) >= pendingvalues:
toInit = int(float(amount) * 10 ** 6) - pendingvalues
else:
toInit = amount * 10 ** 6
if toInit > 0:
txn = (
wduco.functions.initiateWithdraw(username, toInit)
.with_owner(pub_key)
.fee_limit(5_000_000)
.build()
.sign(PrivateKey(bytes.fromhex(priv_key))))
txn = txn.broadcast()
txnfeedback = txn.result()
if txnfeedback:
txn_success = True
else:
txn_success = False
if txn_success or amount <= pendingvalues:
soc.send(
bytes(
"UNWRAP,"
+ str(amount)
+ ","
+ str(pub_key)
+ str(",placeholder"),
encoding="utf8"))
soc.close()
sleep(2)
unWrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pubkeyfile.read()
pubkeyfile.close()
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
unWrapperWindow = Toplevel()
unWrapperWindow.resizable(False, False)
unWrapperWindow.title(get_string("unwrapper_title"))
unWrapperWindow.transient([root])
unWrapperWindow.configure()
askAmount = Label(
unWrapperWindow,
text=get_string("unwrap_amount"))
askAmount.grid(row=1,
column=0,
sticky=N + W)
amountUnWrap = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
amountUnWrap.grid(row=2,
column=0,
sticky=N + W)
askPassphrase = Label(
unWrapperWindow,
text=get_string("ask_passphrase"))
askPassphrase.grid(row=4,
column=0,
sticky=N + W)
passphraseEntry = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
passphraseEntry.grid(
row=5,
column=0,
sticky=N + W)
wrapButton = Button(
unWrapperWindow,
text=get_string("unwrap_duco"),
command=UnWrap)
wrapButton.grid(
row=7,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def settings_window(handler):
def _wrapperconf():
if TRONPY_ENABLED:
privkey_input = StringVar()
passphrase_input = StringVar()
wrapconfWindow = Toplevel()
wrapconfWindow.resizable(False, False)
wrapconfWindow.title(get_string("wrapper_title"))
wrapconfWindow.transient([root])
wrapconfWindow.configure()
def setwrapper():
if privkey_input and passphrase_input:
priv_key = privkey_entry.get()
print("Got priv key:", priv_key)
passphrase = passphrase_entry.get()
print("Got passphrase:", passphrase)
try:
pub_key = PrivateKey(
bytes.fromhex(priv_key)
).public_key.to_base58check_address()
except Exception:
pass
else:
print("Saving data")
privkeyfile = open(
str(resources + "DUCOPrivKey.encrypt"), "w")
privkeyfile.write(
str(password_encrypt(
priv_key.encode(), passphrase
).decode()))
privkeyfile.close()
pubkeyfile = open(
str(resources + "DUCOPubKey.pub"), "w")
pubkeyfile.write(pub_key)
pubkeyfile.close()
Label(wrapconfWindow, text=get_string(
"wrapper_success")).pack()
wrapconfWindow.quit()
title = Label(
wrapconfWindow,
text=get_string("wrapper_config_title"),
font=Font(size=20))
title.grid(row=0,
column=0,
sticky=N + W,
padx=5)
askprivkey = Label(
wrapconfWindow,
text=get_string("ask_private_key"))
askprivkey.grid(row=1,
column=0,
sticky=N + W)
privkey_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=privkey_input)
privkey_entry.grid(row=2,
column=0,
sticky=N + W)
askpassphrase = Label(wrapconfWindow,
text=get_string("passphrase"))
askpassphrase.grid(row=3,
column=0,
sticky=N + W)
passphrase_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=passphrase_input)
passphrase_entry.grid(row=4,
column=0,
sticky=N + W)
wrapConfigButton = Button(
wrapconfWindow,
text=get_string("configure_wrapper_lowercase"),
command=setwrapper)
wrapConfigButton.grid(row=5,
column=0,
sticky=N + W)
wrapconfWindow.mainloop()
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def _logout():
try:
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
print(e)
def _cleartrs():
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM transactions")
con.commit()
def _chgpass():
def _changepassprotocol():
oldpasswordS = oldpassword.get()
newpasswordS = newpassword.get()
confpasswordS = confpassword.get()
if oldpasswordS != newpasswordS:
if oldpasswordS and newpasswordS and confpasswordS:
if newpasswordS == confpasswordS:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(
bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
soc.recv()
soc.send(
bytes(
"CHGP,"
+ str(oldpasswordS)
+ ","
+ str(newpasswordS),
encoding="utf8"))
response = soc.recv().rstrip("\n").split(",")
soc.close()
if not "OK" in response[0]:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=response[1])
else:
messagebox.showinfo(
title=get_string("change_passwd_ok"),
message=response[1])
try:
try:
with sqlconn(
resources + "wallet.db"
) as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
except FileNotFoundError:
pass
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("fill_the_blanks_warning"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("same_passwd_error"))
settingsWindow.destroy()
changepassWindow = Toplevel()
changepassWindow.title(get_string("change_passwd_lowercase"))
changepassWindow.resizable(False, False)
changepassWindow.transient([root])
changepassWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(changepassWindow, size=12, weight="bold")
TEXT_FONT = Font(changepassWindow, size=12, weight="normal")
Label(
changepassWindow,
text=get_string("old_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=0,
sticky=W,
padx=5)
oldpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
oldpassword.grid(row=1,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=2,
sticky=W,
padx=5)
newpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
newpassword.grid(row=3,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("confirm_new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
sticky=W,
padx=5)
confpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
confpassword.grid(row=5,
sticky="nswe",
padx=5)
chgpbtn = Button(
changepassWindow,
text=get_string("change_passwd"),
command=_changepassprotocol,
foreground=FOREGROUND_COLOR,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
chgpbtn.grid(columnspan=2,
sticky="nswe",
pady=5,
padx=5)
settingsWindow = Toplevel()
settingsWindow.resizable(False, False)
settingsWindow.title(get_string("settings_title"))
settingsWindow.transient([root])
settingsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT = Font(
settingsWindow,
size=12,
weight="normal")
TEXT_FONT_BOLD_LARGE = Font(
settingsWindow,
size=12,
weight="bold")
Label(
settingsWindow,
text=get_string("uppercase_settings"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=4,
sticky=S + W,
pady=(5, 5),
padx=(5, 0))
logoutbtn = Button(
settingsWindow,
text=get_string("logout"),
command=_logout,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
logoutbtn.grid(row=1,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
chgpassbtn = Button(
settingsWindow,
text=get_string("change_passwd"),
command=_chgpass,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
chgpassbtn.grid(row=2,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
wrapperconfbtn = Button(
settingsWindow,
text=get_string("configure_wrapper"),
command=_wrapperconf,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
wrapperconfbtn.grid(row=3,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
cleartransbtn = Button(
settingsWindow,
text=get_string("clear_transactions"),
command=_cleartrs,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
cleartransbtn.grid(row=4,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=5,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
Label(
settingsWindow,
text=get_string("logged_in_as")
+ ": "
+ str(username),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=6,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("wallet_version")
+ ": "
+ str(VERSION),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=7,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("translation_author_message")
+ " "
+ get_string("translation_author"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=8,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("config_dev_warning"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=9,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=10,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
original = Image.open(resources + "duco.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
website = ImageTk.PhotoImage(resized)
website.image = website
websiteLabel = Label(
settingsWindow,
image=website,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
websiteLabel.grid(
row=11,
column=0,
sticky=N + S + E + W,
padx=(5, 0),
pady=(0, 5))
websiteLabel.bind("<Button-1>", openWebsite)
original = Image.open(resources + "github.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(
settingsWindow,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(
row=11,
column=1,
sticky=N + S + E + W,
pady=(0, 5))
githubLabel.bind("<Button-1>", openGitHub)
original = Image.open(resources + "exchange.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
exchange = ImageTk.PhotoImage(resized)
exchange.image = exchange
exchangeLabel = Label(
settingsWindow,
image=exchange,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
exchangeLabel.grid(
row=11,
column=2,
sticky=N + S + E + W,
pady=(0, 5))
exchangeLabel.bind("<Button-1>", openExchange)
original = Image.open(resources + "discord.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
discord = ImageTk.PhotoImage(resized)
discord.image = discord
discordLabel = Label(
settingsWindow,
image=discord,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
discordLabel.grid(
row=11,
column=3,
sticky=N + S + E + W,
padx=(0, 5),
pady=(0, 5))
discordLabel.bind("<Button-1>", openDiscord)
def get_balance():
global oldbalance
global balance
global unpaid_balance
global global_balance
global gtxl
try:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv()
soc.send(bytes(
"BALA",
encoding="utf8"))
oldbalance = balance
balance = float(soc.recv().rstrip("\n"))
global_balance = round(float(balance), 8)
try:
gtxl = {}
soc.send(bytes(
"GTXL," + str(username) + ",7",
encoding="utf8"))
gtxl = str(soc.recv().rstrip(
"\n").replace("\'", "\""))
gtxl = jsonloads(gtxl)
except Exception as e:
print("Error getting transaction list: " + str(e))
if oldbalance != balance:
difference = float(balance) - float(oldbalance)
dif_with_unpaid = (
float(balance) - float(oldbalance)) + unpaid_balance
if float(balance) != float(difference):
if (dif_with_unpaid >= MIN_TRANSACTION_VALUE
or dif_with_unpaid < 0
):
now = datetime.now()
difference = round(dif_with_unpaid, 8)
if (
difference >= MIN_TRANSACTION_VALUE_NOTIFY
or difference < 0
and notificationsEnabled
):
notification = Notify()
notification.title = get_string("duino_coin_wallet")
notification.message = (
get_string("notification_new_transaction")
+ "\n"
+ now.strftime("%d.%m.%Y %H:%M:%S\n")
+ str(round(difference, 6))
+ " DUCO")
notification.icon = resources + "duco_color.png"
notification.send(block=False)
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO Transactions(Date, amount)
VALUES(?, ?)""", (
now.strftime("%d.%m.%Y %H:%M:%S"),
round(difference, 8)))
con.commit()
unpaid_balance = 0
else:
unpaid_balance += float(balance) - float(oldbalance)
except Exception as e:
print("Retrying in 3s. (" + str(e) + ")")
Timer(3, get_balance).start()
def get_wbalance():
if TRONPY_ENABLED:
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
wBalance = float(wduco.functions.balanceOf(pub_key)) / (10 ** 6)
return wBalance
except Exception:
return 0.0
else:
return 0.0
def update_balance_labels():
global profit_array, profitCheck
try:
balancetext.set(str(round(global_balance, 7)) + " ᕲ")
wbalancetext.set(str(get_wbalance()) + " wᕲ")
balanceusdtext.set(
"$" + str(round(global_balance * duco_fiat_value, 4)))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT rowid,* FROM Transactions ORDER BY rowid DESC")
Transactions = cur.fetchall()
transactionstext_format = ""
for i, row in enumerate(Transactions, start=1):
transactionstext_format += str(row[1]) + \
" " + str(row[2]) + " DUCO\n"
if i == 6:
transactionstext_format = transactionstext_format.rstrip("\n")
break
transactionstext.set(transactionstext_format)
if profit_array[2] != 0:
sessionprofittext.set(
get_string("session") + ": "
+ str(profit_array[0]) + " ᕲ")
minuteprofittext.set(
"≈" + str(profit_array[1]) + " ᕲ/"
+ get_string("minute"))
hourlyprofittext.set(
"≈" + str(profit_array[2]) + " ᕲ/"
+ get_string("hour"))
dailyprofittext.set(
"≈"
+ str(profit_array[3])
+ " ᕲ/"
+ get_string("day")
+ " ($"
+ str(round(profit_array[3] * duco_fiat_value, 4))
+ ")")
else:
if profitCheck > 10:
sessionprofittext.set(get_string("sessionprofit_unavailable1"))
minuteprofittext.set(get_string("sessionprofit_unavailable2"))
hourlyprofittext.set("")
dailyprofittext.set("")
profitCheck += 1
except Exception:
_exit(0)
Timer(1, update_balance_labels).start()
def profit_calculator(start_bal):
try: # Thanks Bilaboz for the code!
global curr_bal, profit_array
prev_bal = curr_bal
curr_bal = global_balance
session = curr_bal - start_bal
tensec = curr_bal - prev_bal
minute = tensec * 6
hourly = minute * 60
daily = hourly * 24
if tensec >= 0:
profit_array = [
round(session, 8),
round(minute, 6),
round(hourly, 4),
round(daily, 2)]
except Exception:
_exit(0)
Timer(10, profit_calculator, [start_bal]).start()
def send_funds_protocol(handler):
recipientStr = recipient.get()
amountStr = amount.get()
MsgBox = messagebox.askquestion(
get_string("warning"),
get_string("send_funds_warning")
+ " "
+ str(amountStr)
+ " DUCO "
+ get_string("send_funds_to")
+ " "
+ str(recipientStr)
+ "?",
icon="warning",)
if MsgBox == "yes":
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv()
import EllipticCurves
curve = EllipticCurves.EllipticCurves()
message = str(recipientStr) + ":" + str(amountStr)
signature = curve.sign_transaction(message) # returns (r,s,message hash). Use this value to validate transaction on the server
soc.send(
bytes(
"SEND,"
+ "-"
+ ","
+ str(recipientStr)
+ ","
+ str(amountStr)
+ ","
+signature
,
encoding="utf8"))
response = soc.recv().rstrip("\n").split(",")
soc.close()
if "OK" in str(response[0]):
MsgBox = messagebox.showinfo(response[0],
response[1]
+ "\nTXID:"
+ response[2])
else:
MsgBox = messagebox.showwarning(response[0], response[1])
root.update()
def init_rich_presence():
global RPC
try:
RPC = Presence(806985845320056884)
RPC.connect()
except Exception: # Discord not launched
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
balance = round(global_balance, 4)
RPC.update(
details=str(balance)
+ " ᕲ ($"
+ str(round(duco_fiat_value * balance, 2))
+ ")",
start=startTime,
large_image="duco",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception: # Discord not launched
pass
sleep(15)
class Wallet:
def __init__(self, master):
global recipient
global amount
global balancetext
global wbalancetext
global sessionprofittext
global minuteprofittext
global hourlyprofittext
global dailyprofittext
global balanceusdtext
global transactionstext
global curr_bal
global profit_array
try:
loading.destroy()
except Exception:
pass
textFont4 = Font(
size=14,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
size=12,
weight="bold")
TEXT_FONT_BOLD = Font(
size=18,
weight="bold")
TEXT_FONT = Font(
size=12,
weight="normal")
self.master = master
master.resizable(False, False)
master.configure(background=BACKGROUND_COLOR)
master.title(get_string("duino_coin_wallet"))
Label(
master,
text=get_string("uppercase_duino_coin_wallet")
+ ": "
+ str(username),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
balancetext = StringVar()
wbalancetext = StringVar()
balancetext.set(get_string("please_wait"))
if TRONPY_ENABLED:
wbalancetext.set(get_string("please_wait"))
else:
wbalancetext.set("0.00")
balanceLabel = Label(
master,
textvariable=balancetext,
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
balanceLabel.grid(row=1,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
wbalanceLabel = Label(
master,
textvariable=wbalancetext,
font=textFont4,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
wbalanceLabel.grid(row=2,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
balanceusdtext = StringVar()
balanceusdtext.set(get_string("please_wait"))
Label(
master,
textvariable=balanceusdtext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=3,
sticky=S + E,
pady=(0, 1.5),
padx=(0, 5))
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=4,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5),
pady=(0, 5))
def clear_recipient_placeholder(self):
recipient.delete("0", "100")
def clear_amount_placeholder(self):
amount.delete("0", "100")
Label(
master,
text=get_string("recipient"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=0,
sticky=W + S,
padx=(5, 0))
recipient = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
recipient.grid(row=5,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
recipient.insert("0", "revox")
recipient.bind("<FocusIn>", clear_recipient_placeholder)
Label(
master,
text=get_string("amount"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=0,
sticky=W + S,
padx=(5, 0))
amount = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
amount.grid(row=6,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
amount.insert("0", str(VERSION))
amount.bind("<FocusIn>", clear_amount_placeholder)
sendLabel = Button(
master,
text=get_string("send_funds"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
sendLabel.grid(
row=8,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5),
pady=(1, 2))
sendLabel.bind("<Button-1>", send_funds_protocol)
wrapLabel = Button(
master,
text=get_string("wrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=0,
sticky=N + S + E + W,
columnspan=2,
padx=(5, 1),
pady=(1, 5))
wrapLabel.bind("<Button-1>", wrapper_window)
wrapLabel = Button(
master,
text=get_string("unwrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=2,
sticky=N + S + E + W,
columnspan=2,
padx=(1, 5),
pady=(1, 5))
wrapLabel.bind("<Button-1>", unwrapper_window)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=10,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5))
Label(
master,
text=get_string("estimated_profit"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=11,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
sessionprofittext = StringVar()
sessionprofittext.set(get_string("please_wait_calculating"))
sessionProfitLabel = Label(
master,
textvariable=sessionprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
sessionProfitLabel.grid(
row=12,
column=0,
sticky=W,
columnspan=4,
padx=5)
minuteprofittext = StringVar()
minuteProfitLabel = Label(
master,
textvariable=minuteprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
minuteProfitLabel.grid(
row=13,
column=0,
sticky=W,
columnspan=4,
padx=5)
hourlyprofittext = StringVar()
hourlyProfitLabel = Label(
master,
textvariable=hourlyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
hourlyProfitLabel.grid(
row=14,
column=0,
sticky=W,
columnspan=4,
padx=5)
dailyprofittext = StringVar()
dailyprofittext.set("")
dailyProfitLabel = Label(
master,
textvariable=dailyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
dailyProfitLabel.grid(
row=15,
column=0,
sticky=W,
columnspan=4,
padx=5)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=16,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5)
Label(
master,
text=get_string("local_transactions"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=17,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
transactionstext = StringVar()
transactionstext.set("")
transactionstextLabel = Label(
master,
textvariable=transactionstext,
font=TEXT_FONT,
justify=LEFT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionstextLabel.grid(
row=18,
column=0,
sticky=W,
columnspan=4,
padx=5,
pady=(0, 5))
separator = ttk.Separator(master,
orient="horizontal")
separator.grid(
row=19,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5,
pady=(0, 10))
original = Image.open(resources + "transactions.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
transactions = ImageTk.PhotoImage(resized)
transactions.image = transactions
transactionsLabel = Label(
master,
image=transactions,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionsLabel.grid(
row=20,
column=0,
sticky=N + S + W + E,
pady=(0, 5))
transactionsLabel.bind("<Button>", transactions_window)
original = Image.open(resources + "calculator.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
calculator = ImageTk.PhotoImage(resized)
calculator.image = calculator
calculatorLabel = Label(
master,
image=calculator,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
calculatorLabel.grid(
row=20,
column=1,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
calculatorLabel.bind("<Button>", currency_converter_window)
original = Image.open(resources + "stats.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
stats = ImageTk.PhotoImage(resized)
stats.image = stats
statsLabel = Label(
master,
image=stats,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
statsLabel.grid(
row=20,
column=2,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
statsLabel.bind("<Button>", statistics_window)
original = Image.open(resources + "settings.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
settings = ImageTk.PhotoImage(resized)
settings.image = settings
settingsLabel = Label(
master,
image=settings,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
settingsLabel.grid(
row=20,
column=3,
sticky=N + S + W + E,
padx=(0, 10),
pady=(0, 5))
settingsLabel.bind("<Button>", settings_window)
root.iconphoto(True, PhotoImage(file=resources + "duco_color.png"))
start_balance = global_balance
curr_bal = start_balance
profit_calculator(start_balance)
update_balance_labels()
root.mainloop()
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed."
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"pypresence\".")
install("pypresence")
try:
from PIL import Image, ImageTk
except ModuleNotFoundError:
print("Pillow is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"Pillow\".")
install("Pillow")
try:
from notifypy import Notify
except ModuleNotFoundError:
print("Notify-py is not installed. "
+ "Continuing without notification system.")
notificationsEnabled = False
else:
notificationsEnabled = True
try:
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
backend = default_backend()
except ModuleNotFoundError:
print("Cryptography is not installed. "
+ "Please manually install \"cryptography\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import secrets
except ModuleNotFoundError:
print("Secrets is not installed. "
+ "Please manually install \"secrets\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
from base64 import urlsafe_b64decode as b64d
from base64 import urlsafe_b64encode as b64e
except ModuleNotFoundError:
print("Base64 is not installed. "
+ "Please manually install \"base64\""
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import websocket
except ModuleNotFoundError:
print("websocket-client is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"websocket-client\".")
install("websocket-client")
try:
import tronpy
from tronpy.keys import PrivateKey
TRONPY_ENABLED = True
except ModuleNotFoundError:
TRONPY_ENABLED = False
print("Tronpy is not installed. "
+ "Please manually install \"tronpy\" "
+ "if you intend on using wDUCO wrapper.")
else:
try:
tron = tronpy.Tron()
wduco = tron.get_contract("TWYaXdxA12JywrUdou3PFD1fvx2PWjqK9U")
except:
TRONPY_ENABLED = False
print("Tron-side error, disabling wrapper for this session")
if not path.exists(resources):
mkdir(resources)
with sqlconn(resources + "/wallet.db") as con:
cur = con.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS
Transactions(Date TEXT, amount REAL)""")
cur.execute(
"""CREATE TABLE IF NOT EXISTS
UserData(username TEXT, password TEXT, useWrapper TEXT)""")
con.commit()
if not Path(resources + "duco.png").is_file():
urlretrieve("https://i.imgur.com/9JzxR0B.png", resources + "duco.png")
if not Path(resources + "duco_color.png").is_file():
urlretrieve(
"https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/master/"
+ "Resources/duco.png?raw=true",
resources + "duco_color.png")
if not Path(resources + "calculator.png").is_file():
urlretrieve("https://i.imgur.com/iqE28Ej.png",
resources + "calculator.png")
if not Path(resources + "exchange.png").is_file():
urlretrieve("https://i.imgur.com/0qMtoZ7.png",
resources + "exchange.png")
if not Path(resources + "discord.png").is_file():
urlretrieve("https://i.imgur.com/LoctALa.png",
resources + "discord.png")
if not Path(resources + "github.png").is_file():
urlretrieve("https://i.imgur.com/PHEfWbl.png",
resources + "github.png")
if not Path(resources + "settings.png").is_file():
urlretrieve("https://i.imgur.com/NNEI4WL.png",
resources + "settings.png")
if not Path(resources + "transactions.png").is_file():
urlretrieve("https://i.imgur.com/nbVPlKk.png",
resources + "transactions.png")
if not Path(resources + "stats.png").is_file():
urlretrieve("https://i.imgur.com/KRfHZUM.png",
resources + "stats.png")
if not Path(resources + "langs.json").is_file():
urlretrieve(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "Wallet_langs.json",
resources + "langs.json")
# Load language strings depending on system locale
with open(resources + "langs.json", "r", encoding="utf-8") as lang_file:
lang_file = jsonloads(lang_file.read())
try:
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("bg"):
lang = "bulgarian"
elif locale.startswith("nl"):
lang = "dutch"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
except IndexError:
lang = "english"
if __name__ == "__main__":
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count < 1:
root = Tk()
lf = LoginFrame(root)
root.mainloop()
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count >= 1:
loading_window()
cur = con.cursor()
cur.execute("SELECT * FROM UserData")
userdata_query = cur.fetchone()
username = userdata_query[0]
passwordEnc = (userdata_query[1]).decode("utf-8")
password = b64decode(passwordEnc).decode("utf8")
status.config(text=get_string("preparing_wallet_window"))
loading.update()
try:
# Start duco price updater
get_duco_price()
get_balance()
init_rich_presence()
Thread(target=update_rich_presence).start()
try:
# Destroy loading dialog and start the main wallet window
loading.destroy()
except Exception:
pass
root = Tk()
my_gui = Wallet(root)
except Exception as e:
print(e)
_exit(0)
|
DialogPackageManager.py
|
'''
Created on Oct 6, 2013 (from DialogPluginManager.py)
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
from tkinter import simpledialog, Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W
from tkinter.constants import DISABLED, ACTIVE
try:
from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button
except ImportError:
from ttk import Treeview, Scrollbar, Frame, Label, Button
from arelle import PackageManager, DialogURL, DialogOpenArchive
from arelle.CntlrWinTooltip import ToolTip
import os, time, json
try:
import regex as re
except ImportError:
import re
def dialogPackageManager(mainWin):
# check for updates in background
import threading
thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
cntlr.showStatus(_("Checking for updates to packages")) # clear web loading status
packageNamesWithNewerFileDates = PackageManager.packageNamesWithNewerFileDates()
if packageNamesWithNewerFileDates:
cntlr.showStatus(_("Updates are available for these packages: {0}")
.format(', '.join(packageNamesWithNewerFileDates)), clearAfter=5000)
else:
cntlr.showStatus(_("No updates found for packages."), clearAfter=5000)
time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue?
cntlr.uiThreadQueue.put((DialogPackageManager, [cntlr, packageNamesWithNewerFileDates]))
class DialogPackageManager(Toplevel):
def __init__(self, mainWin, packageNamesWithNewerFileDates):
super(DialogPackageManager, self).__init__(mainWin.parent)
self.ENABLE = _("Enable")
self.DISABLE = _("Disable")
self.parent = mainWin.parent
self.cntlr = mainWin
self.webCache = mainWin.webCache
# copy plugins for temporary display
self.packagesConfig = PackageManager.packagesConfig
self.packagesConfigChanged = False
self.packageNamesWithNewerFileDates = packageNamesWithNewerFileDates
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", self.parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.title(_("Taxonomy Packages Manager"))
frame = Frame(self)
# left button frame
buttonFrame = Frame(frame, width=40)
buttonFrame.columnconfigure(0, weight=1)
addLabel = Label(buttonFrame, text=_("Find taxonomy packages:"), wraplength=64, justify="center")
if not self.webCache.workOffline:
addSelectFromRegistryButton = Button(buttonFrame, text=_("Select"), command=self.selectFromRegistry)
ToolTip(addSelectFromRegistryButton, text=_("Select package from the XBRL Package Registry."), wraplength=240)
addLocalButton = Button(buttonFrame, text=_("Locally"), command=self.findLocally)
ToolTip(addLocalButton, text=_("File chooser allows selecting taxonomy packages to add (or reload), from the local file system. "
"Select either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
addWebButton = Button(buttonFrame, text=_("On Web"), command=self.findOnWeb)
ToolTip(addWebButton, text=_("Dialog to enter URL full path to load (or reload) package, from the web or local file system. "
"URL may be either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
manifestNameButton = Button(buttonFrame, text=_("Manifest"), command=self.manifestName)
ToolTip(manifestNameButton, text=_("Provide pre-PWD non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). "
"Uses unix file name pattern matching. "
"Multiple manifest files are supported in pre-PWD archives (such as oasis catalogs). "
"(Replaces pre-PWD search for either .taxonomyPackage.xml or catalog.xml). "), wraplength=480)
self.manifestNamePattern = ""
addLabel.grid(row=0, column=0, pady=4)
selBtnRow = 1
if not self.webCache.workOffline:
addSelectFromRegistryButton.grid(row=selBtnRow, column=0, pady=4)
selBtnRow += 1
addLocalButton.grid(row=selBtnRow, column=0, pady=4)
selBtnRow += 1
addWebButton.grid(row=selBtnRow, column=0, pady=4)
selBtnRow += 1
manifestNameButton.grid(row=selBtnRow, column=0, pady=4)
selBtnRow += 1
buttonFrame.grid(row=0, column=0, rowspan=3, sticky=(N, S, W), padx=3, pady=3)
# right tree frame (packages already known to arelle)
packagesFrame = Frame(frame, width=700)
vScrollbar = Scrollbar(packagesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(packagesFrame, orient=HORIZONTAL)
self.packagesView = Treeview(packagesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)
self.packagesView.grid(row=0, column=0, sticky=(N, S, E, W))
self.packagesView.bind('<<TreeviewSelect>>', self.packageSelect)
hScrollbar["command"] = self.packagesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.packagesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
packagesFrame.columnconfigure(0, weight=1)
packagesFrame.rowconfigure(0, weight=1)
packagesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.packagesView.focus_set()
self.packagesView.column("#0", width=190, anchor="w")
self.packagesView.heading("#0", text=_("Name"))
self.packagesView["columns"] = ("ver", "status", "date", "update", "descr")
self.packagesView.column("ver", width=80, anchor="w", stretch=False)
self.packagesView.heading("ver", text=_("Version"))
self.packagesView.column("status", width=50, anchor="w", stretch=False)
self.packagesView.heading("status", text=_("Status"))
self.packagesView.column("date", width=170, anchor="w", stretch=False)
self.packagesView.heading("date", text=_("File Date"))
self.packagesView.column("update", width=50, anchor="w", stretch=False)
self.packagesView.heading("update", text=_("Update"))
self.packagesView.column("descr", width=200, anchor="w", stretch=False)
self.packagesView.heading("descr", text=_("Description"))
remappingsFrame = Frame(frame)
vScrollbar = Scrollbar(remappingsFrame, orient=VERTICAL)
hScrollbar = Scrollbar(remappingsFrame, orient=HORIZONTAL)
self.remappingsView = Treeview(remappingsFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)
self.remappingsView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.remappingsView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.remappingsView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
remappingsFrame.columnconfigure(0, weight=1)
remappingsFrame.rowconfigure(0, weight=1)
remappingsFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.remappingsView.focus_set()
self.remappingsView.column("#0", width=200, anchor="w")
self.remappingsView.heading("#0", text=_("Prefix"))
self.remappingsView["columns"] = ("remapping")
self.remappingsView.column("remapping", width=500, anchor="w", stretch=False)
self.remappingsView.heading("remapping", text=_("Remapping"))
# bottom frame package info details
packageInfoFrame = Frame(frame, width=700)
packageInfoFrame.columnconfigure(1, weight=1)
self.packageNameLabel = Label(packageInfoFrame, wraplength=600, justify="left",
font=font.Font(family='Helvetica', size=12, weight='bold'))
self.packageNameLabel.grid(row=0, column=0, columnspan=6, sticky=W)
self.packageVersionHdr = Label(packageInfoFrame, text=_("version:"), state=DISABLED)
self.packageVersionHdr.grid(row=1, column=0, sticky=W)
self.packageVersionLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageVersionLabel.grid(row=1, column=1, columnspan=5, sticky=W)
self.packageLicenseHdr = Label(packageInfoFrame, text=_("license:"), state=DISABLED)
self.packageLicenseHdr.grid(row=2, column=0, sticky=W)
self.packageLicenseLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageLicenseLabel.grid(row=2, column=1, columnspan=5, sticky=W)
self.packageDescrHdr = Label(packageInfoFrame, text=_("description:"), state=DISABLED)
self.packageDescrHdr.grid(row=3, column=0, sticky=W)
self.packageDescrLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDescrLabel.grid(row=3, column=1, columnspan=5, sticky=W)
self.packagePrefixesHdr = Label(packageInfoFrame, text=_("prefixes:"), state=DISABLED)
self.packagePrefixesHdr.grid(row=4, column=0, sticky=W)
self.packagePrefixesLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packagePrefixesLabel.grid(row=4, column=1, columnspan=5, sticky=W)
ToolTip(self.packagePrefixesLabel, text=_("List of prefixes that this package remaps."), wraplength=240)
self.packageUrlHdr = Label(packageInfoFrame, text=_("URL:"), state=DISABLED)
self.packageUrlHdr.grid(row=5, column=0, sticky=W)
self.packageUrlLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageUrlLabel.grid(row=5, column=1, columnspan=5, sticky=W)
ToolTip(self.packageUrlLabel, text=_("URL of taxonomy package (local file path or web loaded file)."), wraplength=240)
self.packageDateHdr = Label(packageInfoFrame, text=_("date:"), state=DISABLED)
self.packageDateHdr.grid(row=6, column=0, sticky=W)
self.packageDateLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDateLabel.grid(row=6, column=1, columnspan=5, sticky=W)
ToolTip(self.packageDateLabel, text=_("Filesystem date of currently loaded package file (with parenthetical node when an update is available)."), wraplength=240)
self.publisherHdr = Label(packageInfoFrame, text=_("publisher:"), state=DISABLED)
self.publisherHdr.grid(row=7, column=0, sticky=W)
self.publisherLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.publisherLabel.grid(row=7, column=1, columnspan=5, sticky=W)
ToolTip(self.publisherLabel, text=_("Publisher of currently loaded package file."), wraplength=240)
self.publicationDateHdr = Label(packageInfoFrame, text=_("publication date:"), state=DISABLED)
self.publicationDateHdr.grid(row=8, column=0, sticky=W)
self.publicationDateLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.publicationDateLabel.grid(row=8, column=1, columnspan=5, sticky=W)
ToolTip(self.publicationDateLabel, text=_("Publication date"), wraplength=240)
self.packageEnableButton = Button(packageInfoFrame, text=self.ENABLE, state=DISABLED, command=self.packageEnable)
ToolTip(self.packageEnableButton, text=_("Enable/disable package."), wraplength=240)
self.packageEnableButton.grid(row=9, column=1, sticky=E)
self.packageMoveUpButton = Button(packageInfoFrame, text=_("Move Up"), state=DISABLED, command=self.packageMoveUp)
ToolTip(self.packageMoveUpButton, text=_("Move package up (above other remappings)."), wraplength=240)
self.packageMoveUpButton.grid(row=9, column=2, sticky=E)
self.packageMoveDownButton = Button(packageInfoFrame, text=_("Move Down"), state=DISABLED, command=self.packageMoveDown)
ToolTip(self.packageMoveDownButton, text=_("Move package down (below other remappings)."), wraplength=240)
self.packageMoveDownButton.grid(row=9, column=3, sticky=E)
self.packageReloadButton = Button(packageInfoFrame, text=_("Reload"), state=DISABLED, command=self.packageReload)
ToolTip(self.packageReloadButton, text=_("Reload/update package."), wraplength=240)
self.packageReloadButton.grid(row=9, column=4, sticky=E)
self.packageRemoveButton = Button(packageInfoFrame, text=_("Remove"), state=DISABLED, command=self.packageRemove)
ToolTip(self.packageRemoveButton, text=_("Remove package from packages table (does not erase the package file)."), wraplength=240)
self.packageRemoveButton.grid(row=9, column=5, sticky=E)
packageInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)
packageInfoFrame.config(borderwidth=4, relief="groove")
okButton = Button(frame, text=_("Close"), command=self.ok)
ToolTip(okButton, text=_("Accept and changes (if any) and close dialog."), wraplength=240)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
ToolTip(cancelButton, text=_("Cancel changes (if any) and close dialog."), wraplength=240)
okButton.grid(row=3, column=3, sticky=(S,E), pady=3)
cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)
enableDisableFrame = Frame(frame)
enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)
enableAllButton = Button(enableDisableFrame, text=_("Enable All"), command=self.enableAll)
ToolTip(enableAllButton, text=_("Enable all packages."), wraplength=240)
disableAllButton = Button(enableDisableFrame, text=_("Disable All"), command=self.disableAll)
ToolTip(disableAllButton, text=_("Disable all packages."), wraplength=240)
enableAllButton.grid(row=1, column=1)
disableAllButton.grid(row=1, column=2)
self.loadTreeViews()
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeViews(self):
self.selectedModule = None
# clear previous treeview entries
for previousNode in self.packagesView.get_children(""):
self.packagesView.delete(previousNode)
for i, packageInfo in enumerate(self.packagesConfig.get("packages", [])):
name = packageInfo.get("name", "package{}".format(i))
node = self.packagesView.insert("", "end", "_{}".format(i), text=name)
self.packagesView.set(node, "ver", packageInfo.get("version"))
self.packagesView.set(node, "status", packageInfo.get("status"))
self.packagesView.set(node, "date", packageInfo.get("fileDate"))
if name in self.packageNamesWithNewerFileDates:
self.packagesView.set(node, "update", _("available"))
self.packagesView.set(node, "descr", packageInfo.get("description"))
# clear previous treeview entries
for previousNode in self.remappingsView.get_children(""):
self.remappingsView.delete(previousNode)
for i, remappingItem in enumerate(sorted(self.packagesConfig.get("remappings", {}).items())):
prefix, remapping = remappingItem
node = self.remappingsView.insert("", "end", prefix, text=prefix)
self.remappingsView.set(node, "remapping", remapping)
self.packageSelect() # clear out prior selection
def ok(self, event=None):
if self.packagesConfigChanged:
PackageManager.packagesConfig = self.packagesConfig
PackageManager.packagesConfigChanged = True
self.cntlr.onPackageEnablementChanged()
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def packageSelect(self, *args):
node = (self.packagesView.selection() or (None,))[0]
try:
nodeIndex = int(node[1:])
except (ValueError, TypeError):
nodeIndex = -1
if 0 <= nodeIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][nodeIndex]
self.selectedPackageIndex = nodeIndex
name = packageInfo["name"]
self.packageNameLabel.config(text=name)
self.packageVersionHdr.config(state=ACTIVE)
self.packageVersionLabel.config(text=packageInfo["version"])
self.packageLicenseHdr.config(state=ACTIVE)
self.packageLicenseLabel.config(text=packageInfo.get("license"))
self.packageDescrHdr.config(state=ACTIVE)
self.packageDescrLabel.config(text=packageInfo["description"])
self.packagePrefixesHdr.config(state=ACTIVE)
self.packagePrefixesLabel.config(text=', '.join(packageInfo["remappings"].keys()))
self.packageUrlHdr.config(state=ACTIVE)
self.packageUrlLabel.config(text=packageInfo["URL"])
self.packageDateHdr.config(state=ACTIVE)
self.packageDateLabel.config(text=packageInfo["fileDate"] + " " +
(_("(an update is available)") if name in self.packageNamesWithNewerFileDates else ""))
self.publisherHdr.config(state=ACTIVE)
_publisher = ''
if packageInfo.get("publisher"):
_publisher += packageInfo["publisher"]
if packageInfo.get("publisherCountry"):
_publisher += ", " + packageInfo["publisherCountry"]
if packageInfo.get("publisherURL"):
_publisher += ". " + packageInfo["publisherURL"]
self.publisherLabel.config(text=_publisher)
self.publicationDateHdr.config(state=ACTIVE)
self.publicationDateLabel.config(text=packageInfo.get("publicationDate",''))
self.packageEnableButton.config(state=ACTIVE,
text={"enabled":self.DISABLE,
"disabled":self.ENABLE}[packageInfo["status"]])
self.packageMoveUpButton.config(state=ACTIVE if 0 < nodeIndex else DISABLED)
self.packageMoveDownButton.config(state=ACTIVE if nodeIndex < (len(self.packagesConfig["packages"]) - 1) else DISABLED)
self.packageReloadButton.config(state=ACTIVE)
self.packageRemoveButton.config(state=ACTIVE)
else:
self.selectedPackageIndex = -1
self.packageNameLabel.config(text="")
self.packageVersionHdr.config(state=DISABLED)
self.packageVersionLabel.config(text="")
self.packageLicenseHdr.config(state=DISABLED)
self.packageLicenseLabel.config(text="")
self.packageDescrHdr.config(state=DISABLED)
self.packageDescrLabel.config(text="")
self.packagePrefixesHdr.config(state=DISABLED)
self.packagePrefixesLabel.config(text="")
self.packageUrlHdr.config(state=DISABLED)
self.packageUrlLabel.config(text="")
self.packageDateHdr.config(state=DISABLED)
self.packageDateLabel.config(text="")
self.publisherHdr.config(state=DISABLED)
self.publisherLabel.config(text="")
self.publicationDateHdr.config(state=DISABLED)
self.publicationDateLabel.config(text="")
self.packageEnableButton.config(state=DISABLED, text=self.ENABLE)
self.packageMoveUpButton.config(state=DISABLED)
self.packageMoveDownButton.config(state=DISABLED)
self.packageReloadButton.config(state=DISABLED)
self.packageRemoveButton.config(state=DISABLED)
def selectFromRegistry(self):
choices = [] # list of tuple of (file name, description)
uiLang = (self.cntlr.config.get("userInterfaceLangOverride") or self.cntlr.modelManager.defaultLang or "en")[:2]
def langLabel(labels):
for _lang in uiLang, "en":
for label in labels:
if label["Language"].startswith(_lang):
return label["Label"]
for label in labels:
return label["Label"]
return ""
with open(self.webCache.getfilename("https://taxonomies.xbrl.org/api/v0/taxonomy", reload=True), 'r', errors='replace') as fh:
regPkgs = json.load(fh) # always reload
for pkgTxmy in regPkgs.get("taxonomies", []):
_name = langLabel(pkgTxmy["Name"])
_description = langLabel(pkgTxmy.get("Description"))
_version = pkgTxmy.get("Version")
_license = pkgTxmy.get("License",{}).get("Name")
_url = pkgTxmy.get("Links",{}).get("AuthoritativeURL")
choices.append((_name,
"name: {}\ndescription: {}\nversion: {}\nlicense: {}".format(
_name, _description, _version, _license),
_url, _version, _description, _license))
self.loadPackageUrl(DialogOpenArchive.selectPackage(self, choices))
def findLocally(self):
initialdir = self.cntlr.pluginDir # default plugin directory
if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory
initialdir = self.cntlr.config.setdefault("packageOpenDir", initialdir)
filename = self.cntlr.uiFileDialog("open",
parent=self,
title=_("Choose taxonomy package file"),
initialdir=initialdir,
filetypes=[(_("Taxonomy package files (*.zip)"), "*.zip"),
(_("PWD Manifest (taxonomyPackage.xml)"), "taxonomyPackage.xml"),
(_("pre-PWD Manifest (*.taxonomyPackage.xml)"), "*.taxonomyPackage.xml"),
(_("pre-PWD Oasis Catalog (*catalog.xml)"), "*catalog.xml")],
defaultextension=".zip")
if filename:
# check if a package is selected (any file in a directory containing an __init__.py
self.cntlr.config["packageOpenDir"] = os.path.dirname(filename)
packageInfo = PackageManager.packageInfo(self.cntlr, filename, packageManifestName=self.manifestNamePattern)
self.loadFoundPackageInfo(packageInfo, filename)
def findOnWeb(self):
self.loadPackageUrl(DialogURL.askURL(self))
def loadPackageUrl(self, url):
if url: # url is the in-cache or local file
packageInfo = PackageManager.packageInfo(self.cntlr, url, packageManifestName=self.manifestNamePattern)
self.cntlr.showStatus("") # clear web loading status
self.loadFoundPackageInfo(packageInfo, url)
def manifestName(self):
self.manifestNamePattern = simpledialog.askstring(_("Archive manifest file name pattern"),
_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). \n"
"Uses unix file name pattern matching. \n"
"Multiple manifest files are supported in archive (such as oasis catalogs). \n"
"(If blank, search for either .taxonomyPackage.xml or catalog.xml). "),
initialvalue=self.manifestNamePattern,
parent=self)
def loadFoundPackageInfo(self, packageInfo, url):
if packageInfo and packageInfo.get("name"):
self.addPackageInfo(packageInfo)
self.loadTreeViews()
else:
messagebox.showwarning(_("Package is not itself a taxonomy package. "),
_("File does not itself contain a manifest file: \n\n{0}\n\n "
"If opening an archive file, the manifest file search pattern currently is \"\", please press \"Manifest\" to change manifest file name pattern, e.g.,, \"*.taxonomyPackage.xml\", if needed. ")
.format(url),
parent=self)
def removePackageInfo(self, name, version):
# find package entry
packagesList = self.packagesConfig["packages"]
j = -1
for i, packageInfo in enumerate(packagesList):
if packageInfo['name'] == name and packageInfo['version'] == version:
j = i
break
if 0 <= j < len(packagesList):
del self.packagesConfig["packages"][i]
self.packagesConfigChanged = True
def addPackageInfo(self, packageInfo):
name = packageInfo["name"]
version = packageInfo["version"]
self.removePackageInfo(name, version) # remove any prior entry for this package
self.packageNamesWithNewerFileDates.discard(name) # no longer has an update available
self.packagesConfig["packages"].append(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.packagesConfigChanged = True
def packageEnable(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
if self.packageEnableButton['text'] == self.ENABLE:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
elif self.packageEnableButton['text'] == self.DISABLE:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveUp(self):
if 1 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex -1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveDown(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]) - 1:
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex + 1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageReload(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
url = packageInfo.get("URL")
if url:
packageInfo = PackageManager.packageInfo(self.cntlr, url, reload=True, packageManifestName=packageInfo.get("manifestName"))
if packageInfo:
self.addPackageInfo(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
self.cntlr.showStatus(_("{0} reloaded").format(packageInfo.get("name")), clearAfter=5000)
else:
messagebox.showwarning(_("Package error"),
_("File or package cannot be reloaded: \n\n{0}")
.format(url),
parent=self)
def packageRemove(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
self.removePackageInfo(packageInfo["name"], packageInfo["version"])
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def enableAll(self):
self.enableDisableAll(True)
def disableAll(self):
self.enableDisableAll(False)
def enableDisableAll(self, doEnable):
for iPkg in range(len(self.packagesConfig["packages"])):
packageInfo = self.packagesConfig["packages"][iPkg]
if doEnable:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
else:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
|
livereload_tests.py
|
#!/usr/bin/env python
import contextlib
import email
import io
import os
import sys
import threading
import time
import unittest
from pathlib import Path
from unittest import mock
from mkdocs.livereload import LiveReloadServer
from mkdocs.tests.base import tempdir
class FakeRequest:
def __init__(self, content):
self.in_file = io.BytesIO(content.encode())
self.out_file = io.BytesIO()
self.out_file.close = lambda: None
def makefile(self, *args, **kwargs):
return self.in_file
def sendall(self, data):
self.out_file.write(data)
@contextlib.contextmanager
def testing_server(root, builder=lambda: None, mount_path="/"):
"""Create the server and start most of its parts, but don't listen on a socket."""
with mock.patch("socket.socket"):
server = LiveReloadServer(
builder,
host="localhost",
port=0,
root=root,
mount_path=mount_path,
build_delay=0.1,
bind_and_activate=False,
)
server.setup_environ()
server.observer.start()
thread = threading.Thread(target=server._build_loop, daemon=True)
thread.start()
yield server
server.shutdown()
thread.join()
def do_request(server, content):
request = FakeRequest(content + " HTTP/1.1")
server.RequestHandlerClass(request, ("127.0.0.1", 0), server)
response = request.out_file.getvalue()
headers, _, content = response.partition(b"\r\n\r\n")
status, _, headers = headers.partition(b"\r\n")
status = status.split(None, 1)[1].decode()
headers = email.message_from_bytes(headers)
headers["_status"] = status
return headers, content.decode()
SCRIPT_REGEX = (
r'<script src="/js/livereload.js"></script><script>livereload\([0-9]+, [0-9]+\);</script>'
)
class BuildTests(unittest.TestCase):
@tempdir({"test.css": "div { color: red; }"})
def test_serves_normal_file(self, site_dir):
with testing_server(site_dir) as server:
headers, output = do_request(server, "GET /test.css")
self.assertEqual(output, "div { color: red; }")
self.assertEqual(headers["_status"], "200 OK")
self.assertEqual(headers.get("content-length"), str(len(output)))
@tempdir({"docs/foo.docs": "docs1", "mkdocs.yml": "yml1"})
@tempdir({"foo.site": "original"})
def test_basic_rebuild(self, site_dir, origin_dir):
docs_dir = Path(origin_dir, "docs")
started_building = threading.Event()
def rebuild():
started_building.set()
Path(site_dir, "foo.site").write_text(
Path(docs_dir, "foo.docs").read_text() + Path(origin_dir, "mkdocs.yml").read_text()
)
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir, rebuild)
server.watch(Path(origin_dir, "mkdocs.yml"), rebuild)
time.sleep(0.01)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "original")
Path(docs_dir, "foo.docs").write_text("docs2")
self.assertTrue(started_building.wait(timeout=10))
started_building.clear()
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2yml1")
Path(origin_dir, "mkdocs.yml").write_text("yml2")
self.assertTrue(started_building.wait(timeout=10))
started_building.clear()
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2yml2")
@tempdir({"foo.docs": "a"})
@tempdir({"foo.site": "original"})
def test_rebuild_after_delete(self, site_dir, docs_dir):
started_building = threading.Event()
def rebuild():
started_building.set()
Path(site_dir, "foo.site").unlink()
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir, rebuild)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("b")
self.assertTrue(started_building.wait(timeout=10))
with self.assertLogs("mkdocs.livereload"):
_, output = do_request(server, "GET /foo.site")
self.assertIn("404", output)
@tempdir({"aaa": "something"})
def test_rebuild_after_rename(self, site_dir):
started_building = threading.Event()
with testing_server(site_dir, started_building.set) as server:
server.watch(site_dir)
time.sleep(0.01)
Path(site_dir, "aaa").rename(Path(site_dir, "bbb"))
self.assertTrue(started_building.wait(timeout=10))
@tempdir()
def test_no_rebuild_on_edit(self, site_dir):
started_building = threading.Event()
with open(Path(site_dir, "test"), "wb") as f:
time.sleep(0.01)
with testing_server(site_dir, started_building.set) as server:
server.watch(site_dir)
time.sleep(0.01)
f.write(b"hi\n")
f.flush()
self.assertFalse(started_building.wait(timeout=0.2))
@tempdir({"foo.docs": "a"})
@tempdir({"foo.site": "original"})
def test_custom_action_warns(self, site_dir, docs_dir):
started_building = threading.Event()
def rebuild():
started_building.set()
content = Path(docs_dir, "foo.docs").read_text()
Path(site_dir, "foo.site").write_text(content * 5)
with testing_server(site_dir) as server:
with self.assertWarnsRegex(DeprecationWarning, "func") as cm:
server.watch(docs_dir, rebuild)
time.sleep(0.01)
self.assertIn("livereload_tests.py", cm.filename)
Path(docs_dir, "foo.docs").write_text("b")
self.assertTrue(started_building.wait(timeout=10))
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "bbbbb")
@tempdir({"foo.docs": "docs1"})
@tempdir({"foo.extra": "extra1"})
@tempdir({"foo.site": "original"})
def test_multiple_dirs_can_cause_rebuild(self, site_dir, extra_dir, docs_dir):
started_building = threading.Barrier(2)
def rebuild():
started_building.wait(timeout=10)
content1 = Path(docs_dir, "foo.docs").read_text()
content2 = Path(extra_dir, "foo.extra").read_text()
Path(site_dir, "foo.site").write_text(content1 + content2)
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir)
server.watch(extra_dir)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("docs2")
started_building.wait(timeout=10)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2extra1")
Path(extra_dir, "foo.extra").write_text("extra2")
started_building.wait(timeout=10)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2extra2")
@tempdir({"foo.docs": "docs1"})
@tempdir({"foo.extra": "extra1"})
@tempdir({"foo.site": "original"})
def test_multiple_dirs_changes_rebuild_only_once(self, site_dir, extra_dir, docs_dir):
started_building = threading.Event()
def rebuild():
self.assertFalse(started_building.is_set())
started_building.set()
content1 = Path(docs_dir, "foo.docs").read_text()
content2 = Path(extra_dir, "foo.extra").read_text()
Path(site_dir, "foo.site").write_text(content1 + content2)
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir)
server.watch(extra_dir)
time.sleep(0.01)
_, output = do_request(server, "GET /foo.site")
Path(docs_dir, "foo.docs").write_text("docs2")
Path(extra_dir, "foo.extra").write_text("extra2")
self.assertTrue(started_building.wait(timeout=10))
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2extra2")
@tempdir({"foo.docs": "a"})
@tempdir({"foo.site": "original"})
def test_change_is_detected_while_building(self, site_dir, docs_dir):
before_finished_building = threading.Barrier(2)
can_finish_building = threading.Event()
def rebuild():
content = Path(docs_dir, "foo.docs").read_text()
Path(site_dir, "foo.site").write_text(content * 5)
before_finished_building.wait(timeout=10)
self.assertTrue(can_finish_building.wait(timeout=10))
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("b")
before_finished_building.wait(timeout=10)
Path(docs_dir, "foo.docs").write_text("c")
can_finish_building.set()
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "bbbbb")
before_finished_building.wait(timeout=10)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "ccccc")
@tempdir(
{
"normal.html": "<html><body>hello</body></html>",
"no_body.html": "<p>hi",
"empty.html": "",
"multi_body.html": "<body>foo</body><body>bar</body>",
}
)
def test_serves_modified_html(self, site_dir):
with testing_server(site_dir) as server:
headers, output = do_request(server, "GET /normal.html")
self.assertRegex(output, fr"^<html><body>hello{SCRIPT_REGEX}</body></html>$")
self.assertEqual(headers.get("content-type"), "text/html")
self.assertEqual(headers.get("content-length"), str(len(output)))
_, output = do_request(server, "GET /no_body.html")
self.assertRegex(output, fr"^<p>hi{SCRIPT_REGEX}$")
headers, output = do_request(server, "GET /empty.html")
self.assertRegex(output, fr"^{SCRIPT_REGEX}$")
self.assertEqual(headers.get("content-length"), str(len(output)))
_, output = do_request(server, "GET /multi_body.html")
self.assertRegex(output, fr"^<body>foo</body><body>bar{SCRIPT_REGEX}</body>$")
@tempdir({"index.html": "<body>aaa</body>", "foo/index.html": "<body>bbb</body>"})
def test_serves_modified_index(self, site_dir):
with testing_server(site_dir) as server:
headers, output = do_request(server, "GET /")
self.assertRegex(output, fr"^<body>aaa{SCRIPT_REGEX}</body>$")
self.assertEqual(headers["_status"], "200 OK")
self.assertEqual(headers.get("content-type"), "text/html")
self.assertEqual(headers.get("content-length"), str(len(output)))
_, output = do_request(server, "GET /foo/")
self.assertRegex(output, fr"^<body>bbb{SCRIPT_REGEX}</body>$")
@tempdir()
def test_serves_js(self, site_dir):
with testing_server(site_dir) as server:
for mount_path in "/", "/sub/":
server.mount_path = mount_path
headers, output = do_request(server, "GET /js/livereload.js")
self.assertIn("function livereload", output)
self.assertEqual(headers["_status"], "200 OK")
self.assertEqual(headers.get("content-type"), "application/javascript")
@tempdir()
def test_serves_polling_instantly(self, site_dir):
with testing_server(site_dir) as server:
_, output = do_request(server, "GET /livereload/0/0")
self.assertTrue(output.isdigit())
@tempdir()
@tempdir()
def test_serves_polling_after_event(self, site_dir, docs_dir):
with testing_server(site_dir) as server:
initial_epoch = server._visible_epoch
server.watch(docs_dir)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("b")
_, output = do_request(server, f"GET /livereload/{initial_epoch}/0")
self.assertNotEqual(server._visible_epoch, initial_epoch)
self.assertEqual(output, str(server._visible_epoch))
@tempdir()
def test_serves_polling_with_timeout(self, site_dir):
with testing_server(site_dir) as server:
server.poll_response_timeout = 0.2
initial_epoch = server._visible_epoch
start_time = time.monotonic()
_, output = do_request(server, f"GET /livereload/{initial_epoch}/0")
self.assertGreaterEqual(time.monotonic(), start_time + 0.2)
self.assertEqual(output, str(initial_epoch))
@tempdir()
def test_error_handler(self, site_dir):
with testing_server(site_dir) as server:
server.error_handler = lambda code: b"[%d]" % code
with self.assertLogs("mkdocs.livereload") as cm:
headers, output = do_request(server, "GET /missing")
self.assertEqual(headers["_status"], "404 Not Found")
self.assertEqual(output, "[404]")
self.assertRegex(
"\n".join(cm.output),
r'^WARNING:mkdocs.livereload:.*"GET /missing HTTP/1.1" code 404',
)
@tempdir()
def test_bad_error_handler(self, site_dir):
self.maxDiff = None
with testing_server(site_dir) as server:
server.error_handler = lambda code: 0 / 0
with self.assertLogs("mkdocs.livereload") as cm:
headers, output = do_request(server, "GET /missing")
self.assertEqual(headers["_status"], "404 Not Found")
self.assertIn("404", output)
self.assertRegex(
"\n".join(cm.output), r"Failed to render an error message[\s\S]+/missing.+code 404"
)
@tempdir(
{
"test.html": "<!DOCTYPE html>\nhi",
"test.xml": '<?xml version="1.0" encoding="UTF-8"?>\n<foo></foo>',
"test.css": "div { color: red; }",
"test.js": "use strict;",
"test.json": '{"a": "b"}',
}
)
def test_mime_types(self, site_dir):
with testing_server(site_dir) as server:
headers, _ = do_request(server, "GET /test.html")
self.assertEqual(headers.get("content-type"), "text/html")
headers, _ = do_request(server, "GET /test.xml")
self.assertIn(headers.get("content-type"), ["text/xml", "application/xml"])
headers, _ = do_request(server, "GET /test.css")
self.assertEqual(headers.get("content-type"), "text/css")
headers, _ = do_request(server, "GET /test.js")
self.assertEqual(headers.get("content-type"), "application/javascript")
headers, _ = do_request(server, "GET /test.json")
self.assertEqual(headers.get("content-type"), "application/json")
@tempdir({"index.html": "<body>aaa</body>", "sub/sub.html": "<body>bbb</body>"})
def test_serves_from_mount_path(self, site_dir):
with testing_server(site_dir, mount_path="/sub") as server:
headers, output = do_request(server, "GET /sub/")
self.assertRegex(output, fr"^<body>aaa{SCRIPT_REGEX}</body>$")
self.assertEqual(headers.get("content-type"), "text/html")
_, output = do_request(server, "GET /sub/sub/sub.html")
self.assertRegex(output, fr"^<body>bbb{SCRIPT_REGEX}</body>$")
with self.assertLogs("mkdocs.livereload"):
headers, _ = do_request(server, "GET /sub/sub.html")
self.assertEqual(headers["_status"], "404 Not Found")
@tempdir()
def test_redirects_to_mount_path(self, site_dir):
with testing_server(site_dir, mount_path="/mount/path") as server:
with self.assertLogs("mkdocs.livereload"):
headers, _ = do_request(server, "GET /")
self.assertEqual(headers["_status"], "302 Found")
self.assertEqual(headers.get("location"), "/mount/path/")
@tempdir({"mkdocs.yml": "original", "mkdocs2.yml": "original"}, prefix="tmp_dir")
@tempdir(prefix="origin_dir")
@tempdir({"subdir/foo.md": "original"}, prefix="dest_docs_dir")
def test_watches_direct_symlinks(self, dest_docs_dir, origin_dir, tmp_dir):
try:
Path(origin_dir, "docs").symlink_to(dest_docs_dir, target_is_directory=True)
Path(origin_dir, "mkdocs.yml").symlink_to(Path(tmp_dir, "mkdocs.yml"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
started_building = threading.Event()
def wait_for_build():
result = started_building.wait(timeout=10)
started_building.clear()
with self.assertLogs("mkdocs.livereload"):
do_request(server, "GET /")
return result
with testing_server(tmp_dir, started_building.set) as server:
server.watch(Path(origin_dir, "docs"))
server.watch(Path(origin_dir, "mkdocs.yml"))
time.sleep(0.01)
Path(tmp_dir, "mkdocs.yml").write_text("edited")
self.assertTrue(wait_for_build())
Path(dest_docs_dir, "subdir", "foo.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(origin_dir, "unrelated.md").write_text("foo")
self.assertFalse(started_building.wait(timeout=0.2))
@tempdir(["file_dest_1.md", "file_dest_2.md", "file_dest_unused.md"], prefix="tmp_dir")
@tempdir(["file_under.md"], prefix="dir_to_link_to")
@tempdir()
def test_watches_through_symlinks(self, docs_dir, dir_to_link_to, tmp_dir):
try:
Path(docs_dir, "link1.md").symlink_to(Path(tmp_dir, "file_dest_1.md"))
Path(docs_dir, "linked_dir").symlink_to(dir_to_link_to, target_is_directory=True)
Path(dir_to_link_to, "sublink.md").symlink_to(Path(tmp_dir, "file_dest_2.md"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
started_building = threading.Event()
def wait_for_build():
result = started_building.wait(timeout=10)
started_building.clear()
with self.assertLogs("mkdocs.livereload"):
do_request(server, "GET /")
return result
with testing_server(docs_dir, started_building.set) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(tmp_dir, "file_dest_1.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(dir_to_link_to, "file_under.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(tmp_dir, "file_dest_2.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(docs_dir, "link1.md").unlink()
self.assertTrue(wait_for_build())
Path(tmp_dir, "file_dest_unused.md").write_text("edited")
self.assertFalse(started_building.wait(timeout=0.2))
@tempdir(prefix="site_dir")
@tempdir(["docs/unused.md", "README.md"], prefix="origin_dir")
def test_watches_through_relative_symlinks(self, origin_dir, site_dir):
docs_dir = Path(origin_dir, "docs")
old_cwd = os.getcwd()
os.chdir(docs_dir)
try:
Path(docs_dir, "README.md").symlink_to(Path("..", "README.md"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
finally:
os.chdir(old_cwd)
started_building = threading.Event()
with testing_server(docs_dir, started_building.set) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(origin_dir, "README.md").write_text("edited")
self.assertTrue(started_building.wait(timeout=10))
@tempdir()
def test_watch_with_broken_symlinks(self, docs_dir):
Path(docs_dir, "subdir").mkdir()
try:
if sys.platform != "win32":
Path(docs_dir, "subdir", "circular").symlink_to(Path(docs_dir))
Path(docs_dir, "broken_1").symlink_to(Path(docs_dir, "oh no"))
Path(docs_dir, "broken_2").symlink_to(Path(docs_dir, "oh no"), target_is_directory=True)
Path(docs_dir, "broken_3").symlink_to(Path(docs_dir, "broken_2"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
started_building = threading.Event()
with testing_server(docs_dir, started_building.set) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(docs_dir, "subdir", "test").write_text("test")
self.assertTrue(started_building.wait(timeout=10))
|
app.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import six
import threading
import json
from PIL import Image
from BaseHTTPServer import HTTPServer
import SocketServer as socketserver
from CGIHTTPServer import CGIHTTPRequestHandler
from Queue import Queue
sys.path.append('./cgi-bin')
# Switch LED control class
# (Linux:Real, Otherwise:Virtual)
if (os.name == 'posix'):
posix = True
import LEDcontrol as LEDcontrol
else:
posix = False
import LEDcontrol_test as LEDcontrol
# LED data
import LEDdata
type_LEDs = LEDdata.get_type_LEDs()
dest_LEDs = LEDdata.get_dest_LEDs()
# LED parameters
rows = 32
chain_length = 4
brightness = 50
# Showing parameters
background_color = (0, 0, 0)
interval = 0.1
time_chg = 3.0
xpos = {}
xpos['type'] = 0
xpos['dest'] = 48
def get_image_array(type_idx, dest_idx):
"""Get an image array from type and destination indices.
Destination images change according to the line of the type (utl or ssl).
Args:
type_idx: A type index starting from 1 (sent by a browser).
dest_idx: A destination index starting from 1 (sent by a browser).
Returns:
A dictionary storing images to show.
Its keys are "type" and "dest", which correspond to keys in xpos.
dict['type'] = (RGBTypeImage,)
dict['dest'] = (RGBDestinationImage, RGBLineImage)
"""
images = {}
# Load type image and line
if (type_idx > 0):
type_LED = type_LEDs[type_idx - 1]
images['type'] = (LEDdata.load_image(type_LED[0]),)
line = type_LED[2]
line_text = type_LED[1]
else:
line = 'utl'
line_text = ''
# Load destination images related to line
if (dest_idx > 0):
dest_LED = dest_LEDs[dest_idx - 1]
images['dest'] = (LEDdata.load_image(dest_LED[0][line][0]), LEDdata.load_image(dest_LED[0][line][1]))
dest_text = dest_LED[1]
else:
dest_text = ''
print('%s | %s' % (line_text, dest_text))
return images
def get_LED_image(images, indexes):
"""Get image to show in LED.
Args:
images: An image dictionary given by get_image_array.
indexes: An index dictionary to choose an image from the array to show.
Returns:
An composed image.
"""
im = Image.new('RGB', (rows * chain_length, rows), background_color)
for k in indexes:
if (not k in xpos):
continue
if (not k in images):
continue
else:
idx = indexes[k]
im.paste(images[k][idx], (xpos[k], 0))
return im
class cgi_http_handler(CGIHTTPRequestHandler):
"""CGI handler to process a command sent from a browser."""
def __init__(self, request, client_address, server):
"""Constructor."""
CGIHTTPRequestHandler.__init__(self, request, client_address, server)
def do_POST(self):
"""Process a command."""
# Receive new command
content_len = int(self.headers.get('content-length'))
requestBody = self.rfile.read(content_len).decode('UTF-8')
jsonData = json.loads(requestBody)
# Add command and queue
self.server.q.put(jsonData)
# Send acknowledge of command
self.send_response(100)
self.send_header('Content-type', 'text/html')
return
class threaded_http_server(socketserver.ThreadingMixIn, HTTPServer):
pass
if __name__ == '__main__':
# Initialize queue
q = Queue()
q.put({'type':1, 'dest':1})
# Initialize LED control thread
LED_controller_thread = LEDcontrol.controller(rows, chain_length, brightness)
LED_controller_thread.background_color = background_color
LED_controller_thread.start()
# Initialize HTTP server
server = threaded_http_server(('', 8000), cgi_http_handler)
server.q = q
ip, port = server.server_address
server_thread = threading.Thread(target = server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
# Main loop
try:
print('Press Ctrl-C to exit')
while (True):
# Receive new command
if (not q.empty()):
data = q.get()
images = get_image_array(data['type'], data['dest'])
timer = 0
idx = {}
for k in images.keys():
idx[k] = 0
LED_controller_thread.img = get_LED_image(images, idx)
LED_controller_thread.update_event.set()
# Show and switch images
timer += 1
if (timer >= (time_chg / interval)):
timer = 0
for k in idx.keys():
idx[k] += 1
if (idx[k] == len(images[k])):
idx[k] = 0
LED_controller_thread.img = get_LED_image(images, idx)
LED_controller_thread.update_event.set()
LEDcontrol.wait(interval)
except KeyboardInterrupt:
pass
finally:
LED_controller_thread.stop_event.set()
server.shutdown()
# Terminate
LED_controller_thread.join()
server_thread.join()
|
dothread.py
|
import time, threading
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print('thread %s >>> %s' % (threading.current_thread().name, n))
time.sleep(1)
print('thread %s ended' % threading.current_thread().name)
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended' % threading.current_thread().name)
|
bot.py
|
#!/usr/bin/env python3
"""
bot.py - Phenny IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://inamidst.com/phenny/
"""
import importlib
import irc
import logging
import os
import re
import sys
import threading
import traceback
import tools
logger = logging.getLogger('phenny')
home = os.getcwd()
def decode(bytes):
if type(bytes) == str:
return bytes
try:
text = bytes.decode('utf-8')
except UnicodeDecodeError:
try:
text = bytes.decode('iso-8859-1')
except UnicodeDecodeError:
text = bytes.decode('cp1252')
except AttributeError:
return bytes
return text
class Phenny(irc.Bot):
def __init__(self, config):
args = (config.nick, config.name, config.channels, config.password)
irc.Bot.__init__(self, *args)
self.config = config
self.doc = {}
self.stats = {}
self.setup()
def setup(self):
self.variables = {}
filenames = []
if not hasattr(self.config, 'enable'):
for fn in os.listdir(os.path.join(home, 'modules')):
if fn.endswith('.py') and not fn.startswith('_'):
filenames.append(os.path.join(home, 'modules', fn))
else:
for fn in self.config.enable:
filenames.append(os.path.join(home, 'modules', fn + '.py'))
if hasattr(self.config, 'extra'):
for fn in self.config.extra:
if os.path.isfile(fn):
filenames.append(fn)
elif os.path.isdir(fn):
for n in os.listdir(fn):
if n.endswith('.py') and not n.startswith('_'):
filenames.append(os.path.join(fn, n))
tools.setup(self)
modules = []
excluded_modules = getattr(self.config, 'exclude', [])
for filename in filenames:
name = os.path.basename(filename)[:-3]
if name in excluded_modules: continue
# if name in sys.modules:
# del sys.modules[name]
try:
module_loader = importlib.machinery.SourceFileLoader(name, filename)
module = module_loader.load_module()
if hasattr(module, 'setup'):
module.setup(self)
except Exception as e:
trace = traceback.format_exc()
logger.error("Error loading %s module:\n%s" % (name, trace))
else:
self.register(module)
modules.append(name)
if modules:
logger.info('Registered modules: ' + ', '.join(modules))
else:
logger.warning("Couldn't find any modules")
self.bind_commands()
def register(self, module):
# This is used by reload.py, hence it being methodised
if module.__name__ not in self.variables:
self.variables[module.__name__] = {}
for name, obj in vars(module).items():
if hasattr(obj, 'commands') or hasattr(obj, 'rule'):
self.variables[module.__name__][name] = obj
def bind(self, module, name, func, regexp):
# register documentation
if not hasattr(func, 'name'):
func.name = func.__name__
if func.__doc__:
if hasattr(func, 'example'):
example = func.example
example = example.replace('$nickname', self.nick)
else: example = None
self.doc[func.name] = (func.__doc__, example)
self.commands[func.priority].setdefault(regexp, []).append(func)
def bind_command(self, module, name, func):
logger.debug("Binding module '{:}' command '{:}'".format(module, name))
if not hasattr(func, 'priority'):
func.priority = 'medium'
if not hasattr(func, 'thread'):
func.thread = True
if not hasattr(func, 'event'):
func.event = 'PRIVMSG'
else:
func.event = func.event.upper()
def sub(pattern, self=self):
# These replacements have significant order
pattern = pattern.replace('$nickname', re.escape(self.nick))
return pattern.replace('$nick', r'%s[,:] +' % re.escape(self.nick))
if hasattr(func, 'rule'):
if isinstance(func.rule, str):
pattern = sub(func.rule)
regexp = re.compile(pattern)
self.bind(module, name, func, regexp)
if isinstance(func.rule, tuple):
# 1) e.g. ('$nick', '(.*)')
if len(func.rule) == 2 and isinstance(func.rule[0], str):
prefix, pattern = func.rule
prefix = sub(prefix)
regexp = re.compile(prefix + pattern)
self.bind(module, name, func, regexp)
# 2) e.g. (['p', 'q'], '(.*)')
elif len(func.rule) == 2 and isinstance(func.rule[0], list):
prefix = self.config.prefix
commands, pattern = func.rule
for command in commands:
command = r'(%s)\b(?: +(?:%s))?' % (command, pattern)
regexp = re.compile(prefix + command)
self.bind(module, name, func, regexp)
# 3) e.g. ('$nick', ['p', 'q'], '(.*)')
elif len(func.rule) == 3:
prefix, commands, pattern = func.rule
prefix = sub(prefix)
for command in commands:
command = r'(%s) +' % command
regexp = re.compile(prefix + command + pattern)
self.bind(module, name, func, regexp)
if hasattr(func, 'commands'):
for command in func.commands:
template = r'^%s(%s)(?: +(.*))?$'
pattern = template % (self.config.prefix, command)
regexp = re.compile(pattern)
self.bind(module, name, func, regexp)
def bind_commands(self):
self.commands = {'high': {}, 'medium': {}, 'low': {}}
for module, functions in self.variables.items():
for name, func in functions.items():
self.bind_command(module, name, func)
def wrapped(self, origin, text, match):
class PhennyWrapper(object):
def __init__(self, phenny):
self.bot = phenny
def __getattr__(self, attr):
sender = origin.sender or text
if attr == 'reply':
return (lambda msg:
self.bot.msg(sender, origin.nick + ': ' + msg))
elif attr == 'say':
return lambda msg: self.bot.msg(sender, msg)
elif attr == 'do':
return lambda msg: self.bot.action(sender, msg)
return getattr(self.bot, attr)
return PhennyWrapper(self)
def input(self, origin, text, bytes, match, event, args):
class CommandInput(str):
def __new__(cls, text, origin, bytes, match, event, args):
s = str.__new__(cls, text)
s.sender = decode(origin.sender)
s.nick = decode(origin.nick)
s.event = event
s.bytes = bytes
s.match = match
s.group = match.group
s.groups = match.groups
s.args = args
s.admin = s.nick in self.config.admins
s.owner = s.nick == self.config.owner
s.chans = self.config.channels
#s.bot = self.bot
return s
return CommandInput(text, origin, bytes, match, event, args)
def call(self, func, origin, phenny, input):
try: func(phenny, input)
except tools.GrumbleError as e:
self.msg(origin.sender, str(e))
except Exception as e:
self.error(origin)
def limit(self, origin, func):
if origin.sender and origin.sender.startswith('#'):
if hasattr(self.config, 'limit'):
limits = self.config.limit.get(origin.sender)
if limits and (func.__module__ not in limits):
return True
return False
def dispatch(self, origin, args):
bytes, event = args[0], args[1]
text = decode(bytes)
event = decode(event)
if origin.nick in self.config.ignore:
return
for priority in ('high', 'medium', 'low'):
items = list(self.commands[priority].items())
for regexp, funcs in items:
for func in funcs:
if event != func.event and func.event != '*': continue
match = regexp.match(text)
if match:
if self.limit(origin, func): continue
phenny = self.wrapped(origin, text, match)
input = self.input(origin, text, bytes, match, event, args)
if func.thread:
targs = (func, origin, phenny, input)
t = threading.Thread(target=self.call, args=targs, name=func.name)
t.start()
else: self.call(func, origin, phenny, input)
for source in [decode(origin.sender), decode(origin.nick)]:
try: self.stats[(func.name, source)] += 1
except KeyError:
self.stats[(func.name, source)] = 1
if __name__ == '__main__':
print(__doc__)
|
main.py
|
#!/bin/env python
"""
The MIT License
Copyright (c) 2010 The Chicago Tribune & Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from . import bees
try:
from urllib.parse import urlparse
except ImportError:
from urllib.parse import urlparse
from optparse import OptionParser, OptionGroup, Values
import threading
import time
import sys
def parse_options():
"""
Handle the command line arguments for spinning up bees
"""
parser = OptionParser(usage="""
bees COMMAND [options]
Bees with Machine Guns
A utility for arming (creating) many bees (small EC2 instances) to attack
(load test) targets (web applications).
commands:
up Start a batch of load testing servers.
attack Begin the attack on a specific url.
down Shutdown and deactivate the load testing servers.
report Report the status of the load testing servers.
""")
up_group = OptionGroup(parser, "up",
"""In order to spin up new servers you will need to specify at least the -k command, which is the name of the EC2 keypair to use for creating and connecting to the new servers. The bees will expect to find a .pem file with this name in ~/.ssh/. Alternatively, bees can use SSH Agent for the key.""")
# Required
up_group.add_option('-k', '--key', metavar="KEY", nargs=1,
action='store', dest='key', type='string',
help="The ssh key pair name to use to connect to the new servers.")
up_group.add_option('-s', '--servers', metavar="SERVERS", nargs=1,
action='store', dest='servers', type='int', default=5,
help="The number of servers to start (default: 5).")
up_group.add_option('-g', '--group', metavar="GROUP", nargs=1,
action='store', dest='group', type='string', default='default',
help="The security group(s) to run the instances under (default: default).")
up_group.add_option('-z', '--zone', metavar="ZONE", nargs=1,
action='store', dest='zone', type='string', default='us-east-1d',
help="The availability zone to start the instances in (default: us-east-1d).")
up_group.add_option('-i', '--instance', metavar="INSTANCE", nargs=1,
action='store', dest='instance', type='string', default='ami-ff17fb96',
help="The instance-id to use for each server from (default: ami-ff17fb96).")
up_group.add_option('-t', '--type', metavar="TYPE", nargs=1,
action='store', dest='type', type='string', default='t1.micro',
help="The instance-type to use for each server (default: t1.micro).")
up_group.add_option('-l', '--login', metavar="LOGIN", nargs=1,
action='store', dest='login', type='string', default='newsapps',
help="The ssh username name to use to connect to the new servers (default: newsapps).")
up_group.add_option('-v', '--subnet', metavar="SUBNET", nargs=1,
action='store', dest='subnet', type='string', default=None,
help="The vpc subnet id in which the instances should be launched. (default: None).")
up_group.add_option('-b', '--bid', metavar="BID", nargs=1,
action='store', dest='bid', type='float', default=None,
help="The maximum bid price per spot instance (default: None).")
parser.add_option_group(up_group)
attack_group = OptionGroup(parser, "attack",
"""Beginning an attack requires only that you specify the -u option with the URL you wish to target.""")
# Required
attack_group.add_option('-u', '--url', metavar="URL", nargs=1,
action='store', dest='url', type='string',
help="URL of the target to attack.")
attack_group.add_option('-K', '--keepalive', metavar="KEEP_ALIVE", nargs=0,
action='store', dest='keep_alive', type='string', default=False,
help="Keep-Alive connection.")
attack_group.add_option('-p', '--post-file', metavar="POST_FILE", nargs=1,
action='store', dest='post_file', type='string', default=False,
help="The POST file to deliver with the bee's payload.")
attack_group.add_option('-m', '--mime-type', metavar="MIME_TYPE", nargs=1,
action='store', dest='mime_type', type='string', default='text/plain',
help="The MIME type to send with the request.")
attack_group.add_option('-n', '--number', metavar="NUMBER", nargs=1,
action='store', dest='number', type='int', default=1000,
help="The number of total connections to make to the target (default: 1000).")
attack_group.add_option('-C', '--cookies', metavar="COOKIES", nargs=1, action='store', dest='cookies',
type='string', default='',
help='Cookies to send during http requests. The cookies should be passed using standard cookie formatting, separated by semi-colons and assigned with equals signs.')
attack_group.add_option('-c', '--concurrent', metavar="CONCURRENT", nargs=1,
action='store', dest='concurrent', type='int', default=100,
help="The number of concurrent connections to make to the target (default: 100).")
attack_group.add_option('-H', '--headers', metavar="HEADERS", nargs=1,
action='store', dest='headers', type='string', default='',
help="HTTP headers to send to the target to attack. Multiple headers should be separated by semi-colons, e.g header1:value1;header2:value2")
attack_group.add_option('-e', '--csv', metavar="FILENAME", nargs=1,
action='store', dest='csv_filename', type='string', default='',
help="Store the distribution of results in a csv file for all completed bees (default: '').")
attack_group.add_option('-P', '--contenttype', metavar="CONTENTTYPE", nargs=1,
action='store', dest='contenttype', type='string', default='text/plain',
help="ContentType header to send to the target of the attack.")
attack_group.add_option('-I', '--sting', metavar="sting", nargs=1,
action='store', dest='sting', type='int', default=1,
help="The flag to sting (ping to cache) url before attack (default: 1). 0: no sting, 1: sting sequentially, 2: sting in parallel")
attack_group.add_option('-S', '--seconds', metavar="SECONDS", nargs=1,
action='store', dest='seconds', type='int', default=60,
help= "hurl only: The number of total seconds to attack the target (default: 60).")
attack_group.add_option('-X', '--verb', metavar="VERB", nargs=1,
action='store', dest='verb', type='string', default='',
help= "hurl only: Request command -HTTP verb to use -GET/PUT/etc. Default GET")
attack_group.add_option('-M', '--rate', metavar="RATE", nargs=1,
action='store', dest='rate', type='int',
help= "hurl only: Max Request Rate.")
attack_group.add_option('-a', '--threads', metavar="THREADS", nargs=1,
action='store', dest='threads', type='int', default=1,
help= "hurl only: Number of parallel threads. Default: 1")
attack_group.add_option('-f', '--fetches', metavar="FETCHES", nargs=1,
action='store', dest='fetches', type='int',
help= "hurl only: Num fetches per instance.")
attack_group.add_option('-d', '--timeout', metavar="TIMEOUT", nargs=1,
action='store', dest='timeout', type='int',
help= "hurl only: Timeout (seconds).")
attack_group.add_option('-E', '--send_buffer', metavar="SEND_BUFFER", nargs=1,
action='store', dest='send_buffer', type='int',
help= "hurl only: Socket send buffer size.")
attack_group.add_option('-F', '--recv_buffer', metavar="RECV_BUFFER", nargs=1,
action='store', dest='recv_buffer', type='int',
help= "hurl only: Socket receive buffer size.")
# Optional
attack_group.add_option('-T', '--tpr', metavar='TPR', nargs=1, action='store', dest='tpr', default=None, type='float',
help='The upper bounds for time per request. If this option is passed and the target is below the value a 1 will be returned with the report details (default: None).')
attack_group.add_option('-R', '--rps', metavar='RPS', nargs=1, action='store', dest='rps', default=None, type='float',
help='The lower bounds for request per second. If this option is passed and the target is above the value a 1 will be returned with the report details (default: None).')
attack_group.add_option('-A', '--basic_auth', metavar='basic_auth', nargs=1, action='store', dest='basic_auth', default='', type='string',
help='BASIC authentication credentials, format auth-username:password (default: None).')
attack_group.add_option('-j', '--hurl', metavar="HURL_COMMANDS",
action='store_true', dest='hurl',
help="use hurl")
attack_group.add_option('-o', '--long_output', metavar="LONG_OUTPUT",
action='store_true', dest='long_output',
help="display hurl output")
attack_group.add_option('-L', '--responses_per', metavar="RESPONSE_PER",
action='store_true', dest='responses_per',
help="hurl only: Display http(s) response codes per interval instead of request statistics")
parser.add_option_group(attack_group)
(options, args) = parser.parse_args()
if len(args) <= 0:
parser.error('Please enter a command.')
command = args[0]
#set time for in between threads
delay = 0.2
if command == 'up':
if not options.key:
parser.error('To spin up new instances you need to specify a key-pair name with -k')
if options.group == 'default':
print('New bees will use the "default" EC2 security group. Please note that port 22 (SSH) is not normally open on this group. You will need to use to the EC2 tools to open it before you will be able to attack.')
zone_len = options.zone.split(',')
if len(zone_len) > 1:
if len(options.instance.split(',')) != len(zone_len):
print("Your instance count does not match zone count")
sys.exit(1)
else:
ami_list = [a for a in options.instance.split(',')]
zone_list = [z for z in zone_len]
# for each ami and zone set zone and instance
for tup_val in zip(ami_list, zone_list):
options.instance, options.zone = tup_val
threading.Thread(target=bees.up, args=(options.servers, options.group,
options.zone, options.instance,
options.type,options.login,
options.key, options.subnet,
options.bid)).start()
#time allowed between threads
time.sleep(delay)
else:
bees.up(options.servers, options.group, options.zone, options.instance, options.type, options.login, options.key, options.subnet, options.bid)
elif command == 'attack':
if not options.url:
parser.error('To run an attack you need to specify a url with -u')
regions_list = []
for region in bees._get_existing_regions():
regions_list.append(region)
# urlparse needs a scheme in the url. ab doesn't, so add one just for the sake of parsing.
# urlparse('google.com').path == 'google.com' and urlparse('google.com').netloc == '' -> True
parsed = urlparse(options.url) if '://' in options.url else urlparse('http://'+options.url)
if parsed.path == '':
options.url += '/'
additional_options = dict(
cookies=options.cookies,
headers=options.headers,
post_file=options.post_file,
keep_alive=options.keep_alive,
mime_type=options.mime_type,
csv_filename=options.csv_filename,
tpr=options.tpr,
rps=options.rps,
basic_auth=options.basic_auth,
contenttype=options.contenttype,
sting=options.sting,
hurl=options.hurl,
seconds=options.seconds,
rate=options.rate,
long_output=options.long_output,
responses_per=options.responses_per,
verb=options.verb,
threads=options.threads,
fetches=options.fetches,
timeout=options.timeout,
send_buffer=options.send_buffer,
recv_buffer=options.recv_buffer
)
if options.hurl:
for region in regions_list:
additional_options['zone'] = region
threading.Thread(target=bees.hurl_attack, args=(options.url, options.number, options.concurrent),
kwargs=additional_options).start()
#time allowed between threads
time.sleep(delay)
else:
for region in regions_list:
additional_options['zone'] = region
threading.Thread(target=bees.attack, args=(options.url, options.number,
options.concurrent), kwargs=additional_options).start()
#time allowed between threads
time.sleep(delay)
elif command == 'down':
bees.down()
elif command == 'report':
bees.report()
def main():
parse_options()
|
service.py
|
# Author: asciidisco
# Module: service
# Created on: 13.01.2017
# License: MIT https://goo.gl/5bMj3H
import threading
import SocketServer
import socket
import xbmc
from xbmc import Monitor
from xbmcaddon import Addon
from resources.lib.WidevineHTTPRequestHandler import WidevineHTTPRequestHandler
import util
# helper function to select an unused port on the host machine
def select_unused_port():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
addr, port = sock.getsockname()
sock.shutdown()
sock.close()
return port
except Exception as ex:
return 8000
def log(msg):
xbmc.log(msg=msg.encode('utf-8'), level=xbmc.LOGDEBUG)
if __name__ == '__main__':
if util.use_drm_proxy():
# pick & store a port for the proxy service
wv_proxy_port = select_unused_port()
Addon().setSetting('wv_proxy_port', str(wv_proxy_port))
log('Port {0} selected'.format(str(wv_proxy_port)))
# server defaults
SocketServer.TCPServer.allow_reuse_address = True
# configure the proxy server
wv_proxy = SocketServer.TCPServer(('127.0.0.1', wv_proxy_port), WidevineHTTPRequestHandler)
wv_proxy.server_activate()
wv_proxy.timeout = 1
# start thread for proxy server
proxy_thread = threading.Thread(target=wv_proxy.serve_forever)
proxy_thread.daemon = True
proxy_thread.start()
monitor = Monitor()
# kill the services if kodi monitor tells us to
while not monitor.abortRequested():
# xbmc.sleep(100)
if monitor.waitForAbort(1):
break
# wv-proxy service shutdown sequence
wv_proxy.shutdown()
wv_proxy.server_close()
wv_proxy.socket.close()
log('wv-proxy stopped')
|
main1.py
|
# run on your system
# new file
#test
import socket
import requests
import threading
import json
import datetime
import time
import netifaces as ni
import random
import pymongo
import hashlib
from blockchain import Blockchain
import sys
import _thread
ip = "http://192.168.43.168:5000"
page = "/ul"
login_p = '/logi'
logout_p = '/logout'
data = {
'num' : '1'
}
sport = 0
ssockets = []
chain_set=[]
lap = [12340,12341,12342,12344,12345,12346,12347]
user_count = len(lap)
message_queue=[]
# Login
def login(user):
d = {
'uname' : user
}
r = requests.post(url = ip+login_p, data = d)
return r.text
def logout():
print(threading.get_ident())
r = requests.post(url = ip+logout_p,data={'luname':myuname})
print('Successfully Logged out from server')
cclose()
print('Successfully Closed all sockets')
try:
_thread.interrupt_main()
except KeyboardInterrupt:
try:
_thread.interrupt_main()
except KeyboardInterrupt:
pass
pass
_thread.interrupt_main()
print('returning')
def get_active_users():
r = requests.post(url = ip+page, data = data)
user_list = r.text.split()
return user_list
def handle_transaction(msg):
send_all(blockchain.new_transaction(msg['sender'],msg['receiver'],msg['message'],msg['id'])[1])
def handle_randnum(msg):
blockchain.update_transactions(msg)
def handle_blockchain_request(blockchain_request):
# mybl=mydb.test.find({})
# bllt=[]
# for el in mybl:
# bllt.append(el)
bllt=blockchain.get_blockchain()
print(bllt)
a={'msg-type':'blockchain','blockchain':bllt}
send_msg(a,blockchain_request['sip'])
def handle_blockchain(received_blockchain):
global chain_set
received_blockchain=received_blockchain['blockchain']
chain_set.append(blockchain)
def handle_msg(msg):
print(threading.get_ident())
try:
if(msg['msg-type']=='transaction'):
handle_transaction(msg)
elif(msg['msg-type']=='random_number'):
handle_randnum(msg)
elif(msg['msg-type']=='blockchain_request'):
handle_blockchain_request(msg)
elif(msg['msg-type']=='blockchain'):
handle_blockchain(msg)
except Exception as e:
print(e)
def dl():
print('dl is created')
port=5001
sdl = socket.socket()
sdl.bind(('',port))
sdl.listen(5)
while(True):
c,addr = sdl.accept()
hval='hey'
hval=json.dumps(hval).encode('utf-8')
c.send(hval)
nt = json.loads(c.recv(1024).decode('utf-8'))
if 'logout' in nt.keys():
logout()
c.close()
_thread.interrupt_main()
return
else:
print(threading.get_ident())
print('received transaction from html')
temp=blockchain.new_transaction(nt['sender'],nt['receiver'],nt['message'])
send_all(temp[0])
send_all(temp[1])
c.close()
def socket_listen(soc, port):
print('listening on')
print(port)
soc.bind(('', port))
soc.listen()
while True:
c, addr = soc.accept()
val='connected'
val=json.dumps(val).encode('utf-8')
c.send(val)
msg = c.recv(1024)
msg=json.loads(msg.decode('utf-8'))
print('received')
print(msg)
val='received'
val=json.dumps(val).encode('utf-8')
c.send(val)
handle_msg(msg)
c.close()
def init():
global sport,me,myuname
myuname=sys.argv[1]
sport=int(login(myuname))
global ssockets
ssockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM) for _ in range(user_count)]
me = str(ni.ifaddresses('wlan0')[ni.AF_INET][0]['addr'])
print(me)
print('sport')
print(sport)
c1 = -1
for soc in ssockets:
c1 += 1
if(lap[c1] == sport):
continue
threading.Thread(target = socket_listen,args = (soc, lap[c1])).start()
threading.Thread(target=dl).start()
threading.Thread(target=b_send_msg).start()
global blockchain
blockchain = Blockchain(sys.argv[1])
threading.Thread(target=chek).start()
def send_msg(msg,sip):
global message_queue
message_queue.append([msg,sip])
def b_send_msg():
global message_queue
while(True):
if(len(message_queue)!=0):
m1=message_queue.pop(0)
a_send_msg(m1[0],m1[1])
def a_send_msg(msg,sip):
# if(msg=='close'):
# cclose()
# if(msg == 'logout'):
# logout()
soc = socket.socket()
# print('portszz')
# print(sip)
# print(sport)
soc.connect((sip,sport))
s1=json.loads(soc.recv(1024).decode('utf-8'))
msg=json.dumps(msg).encode('utf-8')
print('sending')
print(msg)
soc.send(msg)
rs=json.loads(soc.recv(1024).decode('utf-8'))
# print(rs)
soc.close()
return rs
def send_all(msg):
ul1=get_active_users()
rsl=[]
for us in ul1:
if(us != me):
print(us,me)
rsl.append(send_msg(msg,us))
return rsl
def cclose():
for s in ssockets:
s.close()
def get_majority_element(n_list):
fr=0
me=-1
for el in n_list:
if type(me)==type(el) and me==el:
fr=fr+1
else:
fr=fr-1
if fr==0 or -1:
me=el
fr=0
fl=False
for el in n_list:
if el==me:
fr=fr+1
if fr>len(n_list)/2:
fl=True
return me,fl
def validate_and_update(update_necessary=True):
global chain_set,me
print(me)
sm=blockchain.valid_chain()
if sm==False or update_necessary:
blockchain.update_state=True
# u1=mydb.test.find({})
# l1=[]
# for el in u1:
# l1.append(el)
chain_set.append(blockchain.get_blockchain())
print(chain_set)
send_all({'msg-type':'blockchain_request','sip':me})
nu=get_active_users()
blockchain.clear_blockchain()
blockchain.create_genesis_block()
while len(chain_set)!=nu:
pass
if len(chain_set)==1:
blockchain.update_state=False
return
maxl=[len(el) for el in chain_set]
maxl,is_there=get_majority_element(maxl)
if if_there==False:
maxl=min([len(el) for el in chain_set])
for el in range(1,maxl):
blockchain.insert_block(get_majority_element([el1[el] for el1 in chain_set])[0])
chain_set=[]
blockchain.update_state=False
def chek():
global blockchain
while True:
if len(blockchain.mineadd)!=0 and blockchain.update_state==False:
# sm=blockchain.valid_chain()
# print('valid chain')
# print(sm)
# if sm:
# temp=blockchain.mineadd.pop()
# blockchain.mine(temp)
# else:
# blockchain.update_chain()
# validate_and_update(1)
temp=blockchain.mineadd.pop()
blockchain.mine(temp)
time.sleep(0.5)
init()
|
preprocess_tuning.py
|
from calibration.grid import GridProcessor
import os
import random
import numpy as np
import threading
import queue
import cv2
IMAGE_FOLDER = 'C:\\Users\\smerk\\Downloads\\images'
IMAGES = os.listdir(IMAGE_FOLDER)
SAMPLES = 8
IMAGES_SAMPLE = random.sample(IMAGES, SAMPLES)
IMAGES_FULL = [os.path.join(IMAGE_FOLDER, image) for image in IMAGES]
IM_SIZE = (400, 400)
THREADS = 5
image_cache = {}
settings = {}
# let's process them
def process_image(item: str, o_q: queue.Queue):
global settings, image_cache
if item in image_cache:
image = image_cache[item].copy()
else:
image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
image_cache[item] = image
process = GridProcessor(image, 1, settings)
process.preprocess_image()
image = process.processed
name = os.path.basename(item)
o_q.put((name, cv2.resize(image, IM_SIZE, interpolation=cv2.INTER_LANCZOS4)))
def process_image_queue(q: queue.Queue, o_q: queue.Queue):
while True:
item = q.get()
if item is None:
break
process_image(item, o_q)
q.task_done()
def process_images():
global IMAGES_FULL
threads = []
in_q = queue.Queue()
out_q = queue.Queue()
for _ in range(THREADS):
thread = threading.Thread(target=process_image_queue, args=(in_q, out_q))
thread.start()
threads.append(thread)
# push into queue
for im in IMAGES_FULL:
in_q.put(im)
# end the queues
for _ in range(THREADS * 3):
in_q.put(None)
# join the threads
for thread in threads:
thread.join()
# display the output images
while True:
try:
(name, image) = out_q.get_nowait()
cv2.imshow(name, image)
except queue.Empty:
break
# controls
def change_kernel(val):
global settings
k = 2*val + 1
settings.update({
'kernel': int(k)
})
process_images()
def change_alpha(val):
global settings
settings.update({
'contrast_alpha': int(val)
})
process_images()
def change_canny_low(val):
global settings
settings.update({
'canny_low': int(val)
})
process_images()
def change_canny_high(val):
global settings
settings.update({
'canny_high': int(val)
})
process_images()
# show all of the images
try:
blank = np.zeros((10, 400), np.uint8)
cv2.imshow('control', blank)
process_images()
cv2.createTrackbar('kernel', 'control', 0, 10, change_kernel)
cv2.createTrackbar('alpha', 'control', 1, 110, change_alpha)
cv2.createTrackbar('canny_low', 'control', 1, 250, change_alpha)
cv2.createTrackbar('canny_high', 'control', 1, 250, change_alpha)
cv2.waitKey(0)
finally:
cv2.destroyAllWindows()
|
threads.py
|
from threading import Thread
from time import sleep
def carro(velocidade, piloto):
trajeto = 0
while trajeto <= 100:
print(f'O piloto {piloto} já percorreu {velocidade}Km')
trajeto += velocidade
sleep(0.5)
carro_1 = Thread(target=carro, args=[1, 'Maki'])
carro_2 = Thread(target=carro, args=[2, 'Fernanda'])
carro_1.start()
carro_2.start()
|
test_multi.py
|
import argparse
import cv2
import numpy as np
import tensorflow as tf
import multiprocessing
import os
import neuralgym as ng
from inpaint_model import InpaintCAModel
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', default='', type=str,
help='The folder containing images to be completed.')
parser.add_argument('--mask_dir', default='', type=str,
help='The folder containing masks, value 255 indicates mask.')
parser.add_argument('--output_dir', default='', type=str,
help='Where to write output.')
parser.add_argument('--checkpoint_dir', default='', type=str,
help='The directory of tensorflow checkpoint.')
def complete(image_file):
ng.get_gpus(1,verbose=False)
tf.reset_default_graph()
model = InpaintCAModel()
image = cv2.imread(os.path.join(args.image_dir, image_file))
mask = cv2.imread(os.path.join(args.mask_dir, image_file))
assert image.shape == mask.shape
h, w, _ = image.shape
grid = 8
image_rs = image[:h // grid * grid, :w // grid * grid, :]
mask_rs = mask[:h // grid * grid, :w // grid * grid, :]
print('Shape of image: {}'.format(image_rs.shape))
image_rs = np.expand_dims(image_rs, 0)
mask_rs = np.expand_dims(mask_rs, 0)
input_image = np.concatenate([image_rs, mask_rs], axis=2)
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
input_image = tf.constant(input_image, dtype=tf.float32)
output = model.build_server_graph(input_image)
output = (output + 1.) * 127.5
output = tf.reverse(output, [-1])
output = tf.saturate_cast(output, tf.uint8)
# load pretrained model
vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
assign_ops = []
for var in vars_list:
vname = var.name
from_name = vname
var_value = tf.contrib.framework.load_variable(args.checkpoint_dir, from_name)
assign_ops.append(tf.assign(var, var_value))
sess.run(assign_ops)
result = sess.run(output)
image[:h // grid * grid, :w // grid * grid, :] = result[0][:, :, ::-1]
save_value = cv2.imwrite(os.path.join(args.output_dir, image_file), image)
print("Image saved:", save_value)
sess.close()
if __name__ == '__main__':
args = parser.parse_args()
image_files = sorted(os.listdir(args.image_dir))
mask_files = sorted(os.listdir(args.mask_dir))
print("places2-256 finetune people mask50 prediction")
for i in range(len(image_files)):
image_file = image_files[i]
mask_file = mask_files[i]
p = multiprocessing.Process(target=complete, args=(image_file,))
p.start()
p.join()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError, parse_max_spend)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnInvoiceException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit, SizedFreezableLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
#if config.get('check_updates') is None:
if False:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
#if config.get('check_updates', False):
if False:
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Optical Electrum"
if constants.net.TESTNET:
name += " " + constants.net.NET_NAME.capitalize()
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
#help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Optical Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("This is a electrum fork, that is compatible with optical bitcoin - OBTC.") + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The bitcoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a bitcoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = (_("Recipient of the funds.") + "\n\n"
+ _("You may enter a Bitcoin address, a label from your list of contacts "
"(a list of completions will be proposed), "
"or an alias (email-like address that forwards to a Bitcoin address)") + ". "
+ _("Lightning invoices are also supported.") + "\n\n"
+ _("You can also pay to many outputs in a single transaction, "
"specifying one output per line.") + "\n" + _("Format: address, amount") + "\n"
+ _("To set the amount to 'max', use the '!' special character.") + "\n"
+ _("Integers weights can also be used in conjunction with '!', "
"e.g. set one amount to '2!' and another to '3!' to split your coins 40-60."))
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = (_('The amount to be received by the recipient.') + ' '
+ _('Fees are paid by the sender.') + '\n\n'
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' '
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n'
+ _('Keyboard shortcut: type "!" to send all your coins.'))
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if any(parse_max_spend(outval) for outval in output_values):
output_value = '!'
else:
output_value = sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_ln_invoice(self, invoice: str):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.show_error(_("Error parsing Lightning invoice") + f":\n{e}")
return
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.payto_e.lightning_invoice = invoice
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def set_bip21(self, text: str):
try:
out = util.parse_URI(text, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
def pay_to_URI(self, text: str):
if not text:
return
# first interpret as lightning invoice
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_bip21(text)
# update fiat amount
self.amount_e.textEdited.emit("")
self.show_send_tab()
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(WWLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(WWLabel(basename), 0, 1)
grid.addWidget(WWLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(WWLabel(wallet_type), 1, 1)
grid.addWidget(WWLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(WWLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(WWLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(WWLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(WWLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(WWLabel(ks_type), 4, 1)
# lightning
grid.addWidget(WWLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(WWLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(WWLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(WWLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(WWLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(WWLabel(_("Derivation path") + ':'))
der_path_text = WWLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(WWLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = _(
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(msg))
msg2 = _("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(msg2))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
test_basic.py
|
from threading import Thread
from time import sleep
import pytest
from busypie import wait, wait_at_most, ConditionTimeoutError, \
FIVE_HUNDRED_MILLISECONDS, MILLISECOND
def test_wait_until_condition_passed():
countdown = CountDown()
countdown.start_from(3)
wait().until(lambda: countdown.done)
assert countdown.done
@pytest.mark.timeout(1)
def test_timeout_when_condition_did_not_meet_in_time():
with pytest.raises(ConditionTimeoutError):
wait().at_most(FIVE_HUNDRED_MILLISECONDS).until(lambda: 1 == 2)
with pytest.raises(ConditionTimeoutError):
wait().at_most(100, MILLISECOND).until(lambda: False)
with pytest.raises(ConditionTimeoutError):
wait_at_most(100, MILLISECOND).until(lambda: 'Pizza' == 'Pie')
class CountDown:
done = False
def start_from(self, start):
Thread(target=self._update_after, args=(start,)).start()
def _update_after(self, start):
for i in range(start, 0):
sleep(0.1)
self.done = True
|
connection.py
|
from logging import getLogger
from selectors import DefaultSelector, EVENT_READ
from socket import socket, SO_REUSEADDR, SOL_SOCKET
from struct import calcsize, pack, unpack
from threading import Lock, Thread
from time import sleep
class Connection:
"""
Provides an interface to a multi-threaded socket that handles network I/O
without blocking the execution of the main program.
"""
HEADER_VERSION = 1
HEADER_PACK_STR = "II" # version, length
HEADER_SIZE = calcsize(HEADER_PACK_STR)
CONNECT_ATTEMPTS = 3
SELECT_TIMEOUT_INTERVAL = 0.3
def __init__(self, controller, send_queue, receive_queue):
"""
Put the connection in an uninitialized, inactive, state.
"""
self.socket = None
self.listener = None
self.socket_lock = None
self.controller = controller
self.send_queue = send_queue
self.receive_queue = receive_queue
@property
def active(self):
"""
Boolean property that is true if the socket has an active connection,
false otherwise.
"""
return self.socket is not None
def startup_accept(self, port):
"""
Start a listening thread to wait on an incoming connection.
"""
self._create_and_run_thread(self._wait_for_connection, (port,))
def startup_connect(self, port, ip_address):
"""
Start a connecting thread to connect to another socket.
"""
self._create_and_run_thread(self._connect_to_peer,
(port, ip_address))
def _create_and_run_thread(self, thread_target, thread_args):
"""
Create a thread with the given target and arguments.
"""
if not self.active:
t = Thread(target = thread_target, args = thread_args)
t.start()
def _wait_for_connection(self, port, *args):
"""
Open a listening socket to wait for incoming peer connection.
TODO CJR: Applications can lock up endlessly here if windows are
closed while waiting for a connection. I should have some mechanism in
close to force this to end.
"""
getLogger(__name__).info("Waiting for connection...")
getLogger(__name__).debug("Listening on port: {}".format(port))
self.listener = self._create_new_socket()
self.listener.bind(("", port))
self.listener.listen(1)
conn = None
try:
conn, addr = self.listener.accept()
except OSError as err: # expected error if the listener socket is
pass # closed during the accept call
if conn is not None:
self._set_socket(conn)
if self.active:
self.controller.start_processing_receive_queue()
self.start()
getLogger(__name__).info("Connection accepted.")
getLogger(__name__).debug("Connected to peer at {}:{}"
.format(addr[0], addr[1]))
else:
getLogger(__name__).warning(("No connection was established."))
def _connect_to_peer(self, port, ip_address):
"""
Create a socket and attempt to connect to a waiting peer.
"""
getLogger(__name__).info("Attempting to connect...")
getLogger(__name__).debug("Peer at {}:{}".format(ip_address, port))
conn = self._create_new_socket()
connected = False
for i in range(self.CONNECT_ATTEMPTS):
try:
conn.connect((ip_address, port))
connected = True
break
except (ConnectionRefusedError, OSError):
getLogger(__name__).debug("Attempt {}/{} failed"
.format(i + 1, self.CONNECT_ATTEMPTS))
if i < self.CONNECT_ATTEMPTS:
sleep(i + 1)
if connected:
self._set_socket(conn)
self.controller.start_processing_receive_queue()
self.start()
getLogger(__name__).info("Connection established.")
else:
getLogger(__name__).warning(("No connection was established."))
def _set_socket(self, socket):
"""
Change any options needed to the socket and initialize the other data
structures needed for sending and receiving.
"""
socket.setblocking(False)
self.socket = socket
self.socket_lock = Lock()
def _create_new_socket(self):
"""
Return a socket with the re-use option set.
"""
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)
return sock
def start(self):
"""
Begin the sending and receiving threads for normal operation.
"""
if self.active:
Thread(target = self._send).start()
Thread(target = self._receive).start()
def close(self):
"""
Release resources held by the connection, putting it back into an
uninitialized state.
"""
if self.listener is not None:
self.listener.close()
self.listener = None
getLogger(__name__).info("Listener closed.")
if self.active:
with self.socket_lock:
self.socket.close()
self.socket = None
self.socket_lock = None
self.send_queue.put(None) # release the send thread
self.receive_queue.put(None) # release the processing thread
getLogger(__name__).info("Connection closed.")
def get_incoming_data(self):
"""
Blocking get from the receive queue, returns None if the connection is
not active.
"""
result = None
if self.active:
result = self.receive_queue.get()
return result
def _send(self):
"""
Loop retrieving data from the send queue and sending it on the socket.
"""
getLogger(__name__).debug("Send thread starting.")
while self.active:
try:
data = self._get_data_from_send_queue()
if self.socket is not None:
header = self._create_data_header(data)
with self.socket_lock:
self.socket.sendall(header + data)
except Exception as err:
getLogger(__name__).warning(("Unexpected exception occurred,"
" send thread may be in a corrupted state\n"
"Error: {}".format(err)))
getLogger(__name__).debug("Send thread done.")
def _get_data_from_send_queue(self):
"""
Retrieve data from the queue. If there is more than a single item,
retrieve multiple pieces of data to improve throughput.
The queue is not guaranteed to be empty after this method, because of
multi-processing new items could be enqueued between the size check
and the creation of the data list.
"""
size = self.send_queue.qsize()
if size > 1:
data = b''.join([self.send_queue.get() for _ in range(size)])
else:
data = self.send_queue.get()
return data
def _receive(self):
"""
Continuously read data from the socket and put it on the receive queue.
"""
selector = DefaultSelector()
selector.register(self.socket, EVENT_READ)
getLogger(__name__).debug("Receive thread starting.")
while self.active:
try:
val = selector.select(self.SELECT_TIMEOUT_INTERVAL)
if val:
with self.socket_lock:
header = None # protecting against close error
if self.socket is not None:
header = self.socket.recv(self.HEADER_SIZE)
if header and header is not None:
data = self._read_data(header)
self.receive_queue.put(data)
else: # connection closed from other end
self.controller.disconnect()
except Exception as err:
getLogger(__name__).warning(("Unexpected exception occurred,"
" receive thread may be in a corrupted state\n"
"Error: {}".format(err)))
getLogger(__name__).debug("Receive thread done.")
def _create_data_header(self, data):
"""
Create a bytes header for variable-sized data messages.
"""
return pack(self.HEADER_PACK_STR, self.HEADER_VERSION, len(data))
def _read_data(self, header):
"""
Use the header to read the body of the message from the socket.
"""
_, msg_size = unpack(self.HEADER_PACK_STR, header)
with self.socket_lock:
data = self.socket.recv(msg_size)
return data
|
phone.py
|
from threading import Event, Thread
from Queue import Queue, Empty
from serial import Serial
from time import sleep
import logging
import string
import shlex
def has_nonascii(s):
ascii_chars = string.ascii_letters+string.digits+"!@#$%^&*()_+\|{}[]-_=+'\",.<>?:; "
return any([char for char in ascii_chars if char not in ascii_chars])
def is_csv(s):
return "," in s
class ATError(Exception):
def __init__(self, expected=None, received=None):
self.received = received
self.expected = expected
message = "Expected {}, got {}".format(expected, repr(received))
Exception.__init__(self, message)
self.received = received
class Modem():
read_buffer_size = 1000
read_timeout = 0.2
unexpected_queue = None
manufacturer = None
model = None
linesep = '\r\n'
ok_response = 'OK'
error_response = 'ERROR'
clcc_header = "+CLCC:"
def __init__(self, serial_path="/dev/ttyAMA0", timeout=0.2, monitor=True):
self.serial_path = serial_path
self.read_timeout = timeout
self.executing_command = Event()
self.should_monitor = Event()
self.unexpected_queue = Queue()
if monitor: self.start_monitoring()
def init_modem(self):
self.port = Serial(self.serial_path, 115200, timeout=self.read_timeout)
self.at()
self.enable_verbosity()
print("Battery voltage is: {}".format(self.get_voltage()))
self.manufacturer = self.at_command("AT+CGMI")
self.model = self.at_command("AT+CGMM")
self.at_command("AT+CLIP=1")
self.save_settings()
def save_settings(self):
self.at_command("AT&W")
def enable_verbosity(self):
return self.at_command('AT+CMEE=1')
def at(self):
response = self.at_command('AT')
if response is True: return
raise ATError(expected=self.ok_response, received=response)
def get_voltage(self):
answer = self.at_command('AT+CBC')
if not answer.startswith('+CBC'): return 0.0
voltage_str = answer.split(':')[1].split(',')[2]
voltage = round(int(voltage_str)/1000.0, 2)
return voltage
def process_clcc(self, clcc_line):
if clcc_line.startswith(self.clcc_header):
clcc_line = clcc_line[len(self.clcc_header):]
clcc_line = clcc_line.strip()
elements = shlex.split(clcc_line, ',')
if len(elements) < 8:
print("Unrecognized number of CLCC elements!")
print(repr(elements))
return
elif len(elements) > 8:
print("Too much CLCC elements!")
elements = elements[:8]
def call(self, number):
return self.at_command("ATD{};".format(number))
def hangup(self):
return self.at_command("ATH", noresponse=True)
def answer(self):
return self.at_command("ATA")
#Callbacks - to be overridden
def on_active_call(self):
print("Call is active - is it ever?")
def on_ring(self):
print("Ring ring ring bananaphone!")
def on_dialing(self):
print("Hope somebody answers...")
def on_busy(self):
print("Can't you see it's busy")
def on_hangup(self):
print("The person you were talking to got seriously bored")
def on_noanswer(self):
print("Somebody's tired of your shit")
def on_incoming_message(self, cmti_line):
print("You've got mail! Line: {}".format(cmti_line[len("+CMTI:"):]).strip())
clcc_mapping = [ #Outgoing
{
"0":on_active_call,
"1":on_held,
"2":on_active_call,
"3":on_active_call,
"4":on_active_call,
"5":on_active_call,
"6":on_hangup}
], [ #Incoming
{
"0":on_active_call,
"1":on_held,
"2":on_active_call,
"3":on_active_call,
"4":on_active_call,
"5":on_active_call,
"6":on_hangup}
],
def on_clcc(self, clcc_line):
#CLCC is operator-dependent, from what I understand.
for i in range(4):
if not has_nonascii(clcc_line) or not is_csv(clcc_line):
break
print("Garbled caller ID line! Try {}, line: {}".format(i, clcc_line))
sleep(1)
clcc_response = self.at_command("AT+CLCC", nook=True)
print(repr(lines))
for line in lines:
if line.startswith(self.clcc_header):
clcc_line = line
else:
self.queue_unexpected_data(line)
if has_nonascii(clcc_line) or not is_csv(clcc_line):
print("Still garbled CLCC line!"); return
print("Caller ID OK, line: {}".format(repr(clcc_line[len(self.clcc_header):])).strip())
#self.process_clcc(clcc_line)
#Low-level functions
def check_input(self):
#print("Checks input")
input = self.port.read(self.read_buffer_size)
if input:
self.queue_unexpected_data(input)
def at_command(self, command, noresponse=False, nook=False):
self.executing_command.set()
self.check_input()
self.port.write(command+self.linesep)
echo = self.port.read(len(command)) #checking for command echo
if echo != command:
raise ATError(received=echo, expected=command)
#print(repr(self.port.read(len(self.linesep)+1)))
self.port.read(len(self.linesep)+1) #shifting through the line separator - that +1 seems to be necessary when we're reading right after the echo
answer = self.port.read(self.read_buffer_size)
self.executing_command.clear()
lines = filter(None, answer.split(self.linesep))
#print(lines)
if not lines and noresponse: return True #one of commands that doesn't need a response
if nook: return lines
if self.ok_response not in lines: #expecting OK as one of the elements
raise ATError(expected=self.ok_response, received=lines)
#We can have a sudden undervoltage warning, though
#I'll assume the OK always goes last in the command
#So we can pass anything after OK to the unexpected line parser
ok_response_index = lines.index(self.ok_response)
if ok_response_index+1 < len(lines):
self.queue_unexpected_data(lines[(ok_response_index+1):])
lines = lines[:(ok_response_index+1)]
if len(lines) == 1: #Single-line response
if lines[0] == self.ok_response:
return True
else:
return lines[0]
else:
lines = lines[:-1]
if len(lines) == 1:
return lines[0]
else:
return lines
#Functions for background monitoring of any unexpected input
def queue_unexpected_data(self, data):
self.unexpected_queue.put(data)
def process_incoming_data(self, data):
logging.debug("Incoming data: {}".format(repr(data)))
if isinstance(data, str):
data = data.split(self.linesep)
lines = filter(None, data)
for line in lines:
#Now onto the callbacks
if line == "RING":
self.on_ring(); return
if line == "BUSY":
self.on_busy(); return
if line == "HANGUP":
self.on_hangup(); return
if line == "NO ANSWER":
self.on_no_answer(); return
if line in ["SMS Ready", "Call Ready"]:
pass; return #Modem just reset
if line.startswith("+CMTI:"):
self.on_incoming_message(line); return
if line.startswith("+CLCC:"):
self.on_clcc(line); return
self.parse_unexpected_message(lines)
def parse_unexpected_message(self, data):
#haaaax
if self.linesep[::-1] in "".join(data):
lines = "".join(data).split(self.linesep[::-1])
logging.debug("Unexpected lines: {}".format(data))
def monitor(self):
while self.should_monitor.isSet():
#print("Monitoring...")
if not self.executing_command.isSet():
#First, the serial port
data = self.port.read(self.read_buffer_size)
if data:
print("Got data through serial!")
self.process_incoming_data(data)
#Then, the queue of unexpected messages received from other commands
try:
data = self.unexpected_queue.get_nowait()
except Empty:
pass
else:
print("Got data from queue!")
self.process_incoming_data(data)
#print("Got to sleep")
sleep(self.read_timeout)
#print("Returned from sleep")
print("Stopped monitoring!")
def start_monitoring(self):
self.should_monitor.set()
self.thread = Thread(target=self.monitor)
self.thread.daemon=True
self.thread.start()
def stop_monitoring(self):
self.should_monitor.clear()
if __name__ == "__main__":
modem = Modem(timeout = 0.5)
modem.init_modem()
|
__main__.py
|
from __future__ import annotations
import argparse
import asyncio
import os
import signal
import sys
import threading
from typing import Any, Set
from .exceptions import ConnectionClosed
from .frames import Close
from .legacy.client import connect
if sys.platform == "win32":
def win_enable_vt100() -> None:
"""
Enable VT-100 for console output on Windows.
See also https://bugs.python.org/issue29059.
"""
import ctypes
STD_OUTPUT_HANDLE = ctypes.c_uint(-11)
INVALID_HANDLE_VALUE = ctypes.c_uint(-1)
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x004
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
if handle == INVALID_HANDLE_VALUE:
raise RuntimeError("unable to obtain stdout handle")
cur_mode = ctypes.c_uint()
if ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(cur_mode)) == 0:
raise RuntimeError("unable to query current console mode")
# ctypes ints lack support for the required bit-OR operation.
# Temporarily convert to Py int, do the OR and convert back.
py_int_mode = int.from_bytes(cur_mode, sys.byteorder)
new_mode = ctypes.c_uint(py_int_mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
if ctypes.windll.kernel32.SetConsoleMode(handle, new_mode) == 0:
raise RuntimeError("unable to set console mode")
def exit_from_event_loop_thread(
loop: asyncio.AbstractEventLoop,
stop: asyncio.Future[None],
) -> None:
loop.stop()
if not stop.done():
# When exiting the thread that runs the event loop, raise
# KeyboardInterrupt in the main thread to exit the program.
if sys.platform == "win32":
ctrl_c = signal.CTRL_C_EVENT
else:
ctrl_c = signal.SIGINT
os.kill(os.getpid(), ctrl_c)
def print_during_input(string: str) -> None:
sys.stdout.write(
# Save cursor position
"\N{ESC}7"
# Add a new line
"\N{LINE FEED}"
# Move cursor up
"\N{ESC}[A"
# Insert blank line, scroll last line down
"\N{ESC}[L"
# Print string in the inserted blank line
f"{string}\N{LINE FEED}"
# Restore cursor position
"\N{ESC}8"
# Move cursor down
"\N{ESC}[B"
)
sys.stdout.flush()
def print_over_input(string: str) -> None:
sys.stdout.write(
# Move cursor to beginning of line
"\N{CARRIAGE RETURN}"
# Delete current line
"\N{ESC}[K"
# Print string
f"{string}\N{LINE FEED}"
)
sys.stdout.flush()
async def run_client(
uri: str,
loop: asyncio.AbstractEventLoop,
inputs: asyncio.Queue[str],
stop: asyncio.Future[None],
) -> None:
try:
websocket = await connect(uri)
except Exception as exc:
print_over_input(f"Failed to connect to {uri}: {exc}.")
exit_from_event_loop_thread(loop, stop)
return
else:
print_during_input(f"Connected to {uri}.")
try:
while True:
incoming: asyncio.Future[Any] = asyncio.create_task(websocket.recv())
outgoing: asyncio.Future[Any] = asyncio.create_task(inputs.get())
done: Set[asyncio.Future[Any]]
pending: Set[asyncio.Future[Any]]
done, pending = await asyncio.wait(
[incoming, outgoing, stop], return_when=asyncio.FIRST_COMPLETED
)
# Cancel pending tasks to avoid leaking them.
if incoming in pending:
incoming.cancel()
if outgoing in pending:
outgoing.cancel()
if incoming in done:
try:
message = incoming.result()
except ConnectionClosed:
break
else:
if isinstance(message, str):
print_during_input("< " + message)
else:
print_during_input("< (binary) " + message.hex())
if outgoing in done:
message = outgoing.result()
await websocket.send(message)
if stop in done:
break
finally:
await websocket.close()
assert websocket.close_code is not None and websocket.close_reason is not None
close_status = Close(websocket.close_code, websocket.close_reason)
print_over_input(f"Connection closed: {close_status}.")
exit_from_event_loop_thread(loop, stop)
def main() -> None:
# If we're on Windows, enable VT100 terminal support.
if sys.platform == "win32":
try:
win_enable_vt100()
except RuntimeError as exc:
sys.stderr.write(
f"Unable to set terminal to VT100 mode. This is only "
f"supported since Win10 anniversary update. Expect "
f"weird symbols on the terminal.\nError: {exc}\n"
)
sys.stderr.flush()
try:
import readline # noqa
except ImportError: # Windows has no `readline` normally
pass
# Parse command line arguments.
parser = argparse.ArgumentParser(
prog="python -m websockets",
description="Interactive WebSocket client.",
add_help=False,
)
parser.add_argument("uri", metavar="<uri>")
args = parser.parse_args()
# Create an event loop that will run in a background thread.
loop = asyncio.new_event_loop()
# Due to zealous removal of the loop parameter in the Queue constructor,
# we need a factory coroutine to run in the freshly created event loop.
async def queue_factory() -> "asyncio.Queue[str]":
return asyncio.Queue()
# Create a queue of user inputs. There's no need to limit its size.
inputs: asyncio.Queue[str] = loop.run_until_complete(queue_factory())
# Create a stop condition when receiving SIGINT or SIGTERM.
stop: asyncio.Future[None] = loop.create_future()
# Schedule the task that will manage the connection.
loop.create_task(run_client(args.uri, loop, inputs, stop))
# Start the event loop in a background thread.
thread = threading.Thread(target=loop.run_forever)
thread.start()
# Read from stdin in the main thread in order to receive signals.
try:
while True:
# Since there's no size limit, put_nowait is identical to put.
message = input("> ")
loop.call_soon_threadsafe(inputs.put_nowait, message)
except (KeyboardInterrupt, EOFError): # ^C, ^D
loop.call_soon_threadsafe(stop.set_result, None)
# Wait for the event loop to terminate.
thread.join()
# For reasons unclear, even though the loop is closed in the thread,
# it still thinks it's running here.
loop.close()
if __name__ == "__main__":
main()
|
udp_socket.py
|
""" GeckoUdpSocket - Gecko UDP socket implementation """
import socket
import logging
import threading
import time
_LOGGER = logging.getLogger(__name__)
class GeckoUdpProtocolHandler:
"""
Protocol handlers manage both sides of a specific conversation part with
a remote end.
The protocol is either initiated by a client or by a server, but either
way a query should always met with a response from the remote end
Both sides may instantiate listening handlers which will deal with
unsolicited requests from remote clients and will respond so that the
remote end knows the request was received and they may also send a query
to the remote end, expecting a response to confirm receipt.
A message sent will either make it to the destination, or it won't. Since
this protocol is built on UDP, we have to cook our own timeout/retry
mechanism to ensure delivery ... oddly this is precisely what TCP already
does, no idea why this wasn't considered but hey-ho, this is all a bit of
fun anyway!
The base protocol handler will manage the lifetime of the handlers within
the socket, and mechanisms are in place to allow retries to be handled
with failure exit points available to allow clients to make class decisions
or instance decisions, either by overridden methods, or by instance handlers
"""
def __init__(self, **kwargs):
# Send functionality
self._send_bytes = kwargs.get("send_bytes", None)
self.last_destination = None
# Receive functionality
self._on_handled = kwargs.get("on_handled", None)
# Lifetime functionality
self._start_time = time.monotonic()
self._timeout_in_seconds = kwargs.get("timeout", 0)
self._retry_count = kwargs.get("retry_count", 0)
self._on_retry_failed = kwargs.get("on_retry_failed", None)
self._should_remove_handler = False
##########################################################################
#
# SEND FUNCTIONALITY
#
##########################################################################
@property
def send_bytes(self) -> bytes:
"""The bytes to send to the remote end. Either uses the class instance
data _send_bytes or can be overridden in a base class"""
if self._send_bytes is None:
raise NotImplementedError
return self._send_bytes
##########################################################################
#
# RECEIVE FUNCTIONALITY
#
##########################################################################
def can_handle(self, received_bytes: bytes, sender: tuple) -> bool:
"""Check if you can handle these bytes. If you return True, then your
handle method will get called and no other handlers will be given a
chance to process this data. If you return False then the search for a
suitable handler will continue"""
return False
def handle(self, socket, received_bytes: bytes, sender: tuple):
"""Handle this data. This will only be called if you returned True
from the `can_handle` function. If you wish to remove this handler
from the system, then you should set the `should_remove_handler`
member."""
def handled(self, socket, sender: tuple):
self._reset_timeout()
if self._on_handled is not None:
self._on_handled(self, socket, sender)
##########################################################################
#
# LIFETIME MANAGEMENT
#
##########################################################################
@property
def age(self):
return time.monotonic() - self._start_time
@property
def has_timedout(self):
return (
self.age > self._timeout_in_seconds
if self._timeout_in_seconds > 0
else False
)
@property
def should_remove_handler(self):
return self._should_remove_handler
def _reset_timeout(self):
self._start_time = time.monotonic()
def retry(self, socket):
if self._retry_count == 0:
return False
self._retry_count -= 1
_LOGGER.debug("Handler retry count %d", self._retry_count)
self._reset_timeout()
if socket is not None:
# Queue another send
socket.queue_send(self, self.last_destination)
return True
def loop(self, socket):
""" Executed each time around the socket loop """
if not self.has_timedout:
return
_LOGGER.debug("Handler has timed out")
if self.retry(socket):
return
if self._on_retry_failed is not None:
self._on_retry_failed(self, socket)
@staticmethod
def _default_retry_failed_handler(handler, socket):
_LOGGER.debug("Default retry failed handler for %r being used", handler)
handler._should_remove_handler = True
# Pythonic methods
def __repr__(self):
return (
f"{self.__class__.__name__}(send_bytes={self._send_bytes!r},"
f" age={self.age}, has_timedout={self.has_timedout},"
f" should_remove_handler={self.should_remove_handler},"
f" timeout={self._timeout_in_seconds}s,"
f" retry_count={self._retry_count}"
f")"
)
class GeckoUdpSocket:
"""Gecko in.touch2 uses UDP to communicate. This class is a wrapper around
a socket and a thread and serviced by classes derived from GeckoUdpSendHandler
GeckoUdpReceiveHandler and GeckoUdpProtocolHandler
"""
_PORT = 10022
_SOCKET_TIMEOUT = 0.05
_MAX_PACKET_SIZE = 8192
_SENDING_THROTTLE_RATE_PER_SECOND = 50
def __init__(self, socket=None):
self._socket = socket
self._exit_event = None
self._thread = None
self._lock = threading.Lock()
self._send_handlers = []
self._receive_handlers = []
self._busy_count = 0
self._sequence_counter = 0
self._last_send_time = time.monotonic()
def __enter__(self):
if self._socket is None:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)
self._socket.settimeout(self._SOCKET_TIMEOUT)
self._exit_event = threading.Event()
self._thread = threading.Thread(target=self._thread_func, daemon=True)
self._thread.start()
return self
def __exit__(self, *args):
if self._exit_event:
self._exit_event.set()
if self._thread:
self._thread.join()
self._thread = None
if self._socket:
self._socket.close()
self._socket = None
class _BusyLock:
def __init__(self, socket):
self._socket = socket
def __enter__(self):
with self._socket._lock:
self._socket._busy_count += 1
def __exit__(self, *args):
with self._socket._lock:
self._socket._busy_count -= 1
def open(self):
"""Start the use of this UDP socket object if not used
in a `with` statement"""
self.__enter__()
def close(self):
"""Finish the use of this UDP socket object if not used
in a `with` statement"""
self.__exit__()
@property
def isopen(self):
"""Check to see if the socket is open"""
if not self._exit_event:
return False
return not self._exit_event.is_set()
@property
def isbusy(self):
"""Check to see if the socket is busy"""
if self._send_handlers:
return True
if self._receive_handlers:
return True
return self._busy_count > 0
def wait(self, timeout):
""" Wait for a timeout, respecting the exit event """
self._exit_event.wait(timeout)
def bind(self):
"""Bind this UDP socket to the local address and port"""
self._socket.bind(("", self._PORT))
def enable_broadcast(self):
"""Set this UDP socket to support broadcast"""
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def add_receive_handler(self, protocol_handler: GeckoUdpProtocolHandler):
"""Add a receive handler to the list of available handlers"""
with self._lock:
self._receive_handlers.append(protocol_handler)
def remove_receive_handler(self, protocol_handler: GeckoUdpProtocolHandler):
"""Remove a receive handler from the list of available handlers"""
with self._lock:
self._receive_handlers.remove(protocol_handler)
def queue_send(self, protocol_handler: GeckoUdpProtocolHandler, destination: tuple):
"""Queue a message to be sent by the worker thread"""
with self._lock:
self._send_handlers.append((protocol_handler, destination))
def get_and_increment_sequence_counter(self):
with self._lock:
self._sequence_counter += 1
return self._sequence_counter % 256
def _process_send_requests(self):
# Throttle the sending rate to prevent message loss
if (time.monotonic() - self._last_send_time) < (
1.0 / self._SENDING_THROTTLE_RATE_PER_SECOND
):
return
with GeckoUdpSocket._BusyLock(self):
# Assume there are no send requests
send_handler = None
# Play safely with clients, and minimize the time spend locked
with self._lock:
if self._send_handlers:
send_handler = self._send_handlers.pop(0)
# If there is a handler, then use it
if send_handler:
try:
send_bytes = send_handler[0].send_bytes
destination = send_handler[1]
if destination is None:
raise AssertionError(
f"Cannot have destination set to None for {send_handler}"
)
# For convenience, the entire destination sometimes gets passed in,
# this fixes it to be just the address and port
if len(destination) > 2:
destination = (destination[0], destination[1])
send_handler[0].last_destination = destination
_LOGGER.debug("Sending %s to %s", send_bytes, destination)
self._socket.sendto(send_bytes, destination)
self._last_send_time = time.monotonic()
except Exception:
_LOGGER.exception("Exception during send processing")
def dispatch_recevied_data(self, received_bytes: bytes, remote_end: tuple):
""" Dispatch bytes to the handlers, maybe someone is interested! """
with GeckoUdpSocket._BusyLock(self):
_LOGGER.debug("Received %s from %s", received_bytes, remote_end)
receive_handler = None
with self._lock:
for handler in self._receive_handlers:
if handler.can_handle(received_bytes, remote_end):
receive_handler = handler
break
if receive_handler:
try:
receive_handler.handle(self, received_bytes, remote_end)
receive_handler.handled(self, remote_end)
except Exception:
_LOGGER.exception("Unhandled exception in receive_handler func")
else:
_LOGGER.warning("Couldn't find new handler for %s", received_bytes)
def _process_received_data(self):
with GeckoUdpSocket._BusyLock(self):
try:
received_bytes, remote_end = self._socket.recvfrom(
self._MAX_PACKET_SIZE
)
self.dispatch_recevied_data(received_bytes, remote_end)
except (socket.timeout, OSError):
return
finally:
pass
def _cleanup_handlers(self):
with GeckoUdpSocket._BusyLock(self):
remove_handlers = []
with self._lock:
# Build list of handlers that need to be removed
remove_handlers = [
handler
for handler in self._receive_handlers
if handler.should_remove_handler
]
if remove_handlers:
_LOGGER.debug("Removed timedout handlers %s", remove_handlers)
# Remove them from the collection
with self._lock:
self._receive_handlers = [
handler
for handler in self._receive_handlers
if handler not in remove_handlers
]
if remove_handlers:
_LOGGER.debug("Remaining handlers %s", self._receive_handlers)
def _thread_func(self):
while self.isopen:
self._process_send_requests()
self._process_received_data()
# Do loop for timeout/retry
for handler in self._receive_handlers:
handler.loop(self)
self._cleanup_handlers()
self._loop_func()
_LOGGER.info("GeckoUdpSocket thread finished")
def _loop_func(self):
# Opportunity for sub-class to get a thread loop
pass
def __repr__(self):
return (
f"{self.__class__.__name__} on {self._socket!r}\n"
f" receive_handlers={self._receive_handlers!r},\n"
f" send_handlers={self._send_handlers!r}\n"
f" isopen: {self.isopen}"
f" isbusy: {self.isbusy}"
)
|
run.py
|
from pyA20.gpio import port
from pyA20.gpio import gpio
from threading import Thread
from datetime import datetime
from time import sleep
import json
import time
# config
inputPort = port.PA11
reportServerAddress = 'http://chcesiku.pl'
statusEndPoint = '/api/status'
heartbeatEndPoint = '/api/heartbeat'
interval = 10
# end config
import urllib2
def http_post(url, header, data):
req = urllib2.Request(url, data, headers=header)
response = urllib2.urlopen(req)
return response
class WCStateManager:
def __init__(self, reportServer):
self.wcState = 0
self.lastStateChange = datetime.now()
self.reportServer = reportServer
def getStateName(self):
if self.wcState == 0:
return 'Free'
else:
return 'Occupied'
def setWCState(self, state):
if self.wcState == state:
pass
else:
now = datetime.now()
diff = now - self.lastStateChange
self.lastStateChange = now
self.wcState = state
self.sendState(diff)
def sendState(self, diff):
data = {'ChangeDate': self.lastStateChange.isoformat(), 'Status': self.getStateName(), 'LastStatusDuration': diff.seconds}
headers = {'Content-type': 'application/json'}
isSent = False
while not isSent:
try:
response = http_post(self.reportServer, headers, json.dumps(data))
d = response.read()
print('status= %s.data=%s' % (response.code, d))
isSent = True
except:
print('Sending data failed, retrying')
def startHeartbeat(interval, reportServer):
while True:
try:
response = http_post(reportServer,{},'')
print('Heartbeat status= %s' % response.code)
except:
print('Heartbeat failed, retry in: %s' % interval)
time.sleep(interval)
gpio.init()
gpio.setcfg(inputPort, gpio.INPUT)
gpio.pullup(inputPort, 0)
gpio.pullup(inputPort, gpio.PULLDOWN)
manager = WCStateManager(reportServerAddress+statusEndPoint)
t = Thread(target=startHeartbeat, args=(interval, reportServerAddress+heartbeatEndPoint))
t.start()
while True:
if gpio.input(inputPort) == 1:
manager.setWCState(1)
else:
manager.setWCState(0)
sleep(1)
|
__init__.py
|
from multiprocessing import cpu_count, Process
from threading import Thread
from time import perf_counter
from statistics import mean, variance
from sum_seq.python.sum_seq import sum_seq as py_sum_seq
from sum_seq.c.sum_seq import sum_seq as c_sum_seq
from sum_seq.cython.sum_seq import sum_seq as cy_sum_seq
FUNCTIONS = {
"python": py_sum_seq,
"c": c_sum_seq,
"cython": cy_sum_seq,
}
def _sequence_exec(n, val, func):
start = perf_counter()
for _ in range(n):
func(val)
return perf_counter() - start
def _thread_exec(n, val, func):
start = perf_counter()
threads = [Thread(target=func, args=(val, )) for _ in range(n)]
for t in threads:
t.start()
for t in threads:
t.join()
return perf_counter() - start
def _process_exec(n, val, func):
start = perf_counter()
processes = [Process(target=func, args=(val, )) for _ in range(n)]
for p in processes:
p.start()
for p in processes:
p.join()
return perf_counter() - start
def benchmark_sum_seq(n: int = cpu_count(), val: int = 2 * 10 ** 8, iterations: int = 5):
print(f"Use n: {n}, val: {val}, iterations: {iterations}")
result = {(tp, var): [] for tp in FUNCTIONS for var in ["sequence", "thread", "process"]}
# Warm up
print("Warm up (single run)")
for tp, func in FUNCTIONS.items():
print(f"{tp} (sequence)\t{_sequence_exec(n, val, func):.5}")
print(f"{tp} (thread)\t{_thread_exec(n, val, func):.5}")
print(f"{tp} (process)\t{_process_exec(n, val, func):.5}")
# Benchmark
print("Start benchmark")
for _ in range(iterations):
print(f".. {_ + 1}/{iterations}")
for tp, func in FUNCTIONS.items():
result[(tp, "sequence")].append(_sequence_exec(n, val, func))
result[(tp, "thread")].append(_thread_exec(n, val, func))
result[(tp, "process")].append(_process_exec(n, val, func))
print("Results\n")
print("Method\tTime")
for (tp, var), values in sorted(result.items(), key=lambda x: x[0]):
print(f"{tp} ({var})\t{mean(values):.5} (±{variance(values):.7})")
|
views.py
|
import collections
import datetime
import re
import threading
import time
from collections import defaultdict
from operator import itemgetter
import simplejson as json
import yaml
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms import model_to_dict
from django.http import JsonResponse
from django.utils import timezone, dateformat
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
import api.models as models
import directions.models as directions
import users.models as users
from api.ws import emit
from appconf.manager import SettingManager
from barcodes.views import tubes
from clients.models import CardBase, Individual, Card, Document, DocumentType
from directory.models import AutoAdd, Fractions, ParaclinicInputGroups, ParaclinicInputField
from laboratory import settings
from laboratory.decorators import group_required
from laboratory.utils import strdate, strdatetime, tsdatetime
from podrazdeleniya.models import Podrazdeleniya
from results.views import result_normal
from rmis_integration.client import Client, get_direction_full_data_cache
from slog import models as slog
from slog.models import Log
from statistics_tickets.models import VisitPurpose, ResultOfTreatment, StatisticsTicket, Outcomes, \
ExcludePurposes
from utils.dates import try_parse_range, try_strptime
def translit(locallangstring):
"""
Translit func
:param locallangstring: orign
:return: translit of locallangstring
"""
conversion = {
u'\u0410': 'A', u'\u0430': 'a',
u'\u0411': 'B', u'\u0431': 'b',
u'\u0412': 'V', u'\u0432': 'v',
u'\u0413': 'G', u'\u0433': 'g',
u'\u0414': 'D', u'\u0434': 'd',
u'\u0415': 'E', u'\u0435': 'e',
u'\u0401': 'Yo', u'\u0451': 'yo',
u'\u0416': 'Zh', u'\u0436': 'zh',
u'\u0417': 'Z', u'\u0437': 'z',
u'\u0418': 'I', u'\u0438': 'i',
u'\u0419': 'Y', u'\u0439': 'y',
u'\u041a': 'K', u'\u043a': 'k',
u'\u041b': 'L', u'\u043b': 'l',
u'\u041c': 'M', u'\u043c': 'm',
u'\u041d': 'N', u'\u043d': 'n',
u'\u041e': 'O', u'\u043e': 'o',
u'\u041f': 'P', u'\u043f': 'p',
u'\u0420': 'R', u'\u0440': 'r',
u'\u0421': 'S', u'\u0441': 's',
u'\u0422': 'T', u'\u0442': 't',
u'\u0423': 'U', u'\u0443': 'u',
u'\u0424': 'F', u'\u0444': 'f',
u'\u0425': 'H', u'\u0445': 'h',
u'\u0426': 'Ts', u'\u0446': 'ts',
u'\u0427': 'Ch', u'\u0447': 'ch',
u'\u0428': 'Sh', u'\u0448': 'sh',
u'\u0429': 'Sch', u'\u0449': 'sch',
u'\u042a': '', u'\u044a': '',
u'\u042b': 'Y', u'\u044b': 'y',
u'\u042c': '', u'\u044c': '',
u'\u042d': 'E', u'\u044d': 'e',
u'\u042e': 'Yu', u'\u044e': 'yu',
u'\u042f': 'Ya', u'\u044f': 'ya',
}
translitstring = []
for c in locallangstring:
translitstring.append(conversion.setdefault(c, c))
return ''.join(translitstring)
@csrf_exempt
def send(request):
"""
Sysmex save results
:param request:
:return:
"""
result = {"ok": False}
try:
if request.method == "POST":
resdict = yaml.load(request.POST["result"])
appkey = request.POST.get("key", "")
else:
resdict = yaml.load(request.GET["result"])
appkey = request.GET.get("key", "")
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
resdict["pk"] = int(resdict.get("pk", -111))
if "LYMPH%" in resdict["result"]:
resdict["orders"] = {}
dpk = -1
if "bydirection" in request.POST or "bydirection" in request.GET:
dpk = resdict["pk"]
if dpk >= 4600000000000:
dpk -= 4600000000000
dpk //= 10
tubes(request, direction_implict_id=dpk)
if directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk,
issledovaniya__doc_confirmation__isnull=True).exists():
resdict["pk"] = directions.TubesRegistration.objects.filter(
issledovaniya__napravleniye__pk=dpk, issledovaniya__doc_confirmation__isnull=True).order_by(
"pk").first().pk
else:
resdict["pk"] = False
result["A"] = appkey
if resdict["pk"] and models.Application.objects.filter(key=appkey).exists() and models.Application.objects.get(
key=appkey).active and directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
tubei = directions.TubesRegistration.objects.get(pk=resdict["pk"])
direction = tubei.issledovaniya_set.first().napravleniye
pks = []
for key in resdict["result"].keys():
if models.RelationFractionASTM.objects.filter(astm_field=key).exists():
fractionRels = models.RelationFractionASTM.objects.filter(astm_field=key)
for fractionRel in fractionRels:
fraction = fractionRel.fraction
if directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction.research,
doc_confirmation__isnull=True).exists():
issled = directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction.research,
doc_confirmation__isnull=True).order_by(
"pk")[0]
if directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction).exists(): # Если результат для фракции существует
fraction_result = directions.Result.objects.filter(issledovaniye=issled,
fraction__pk=fraction.pk).order_by(
"-pk")[0]
else:
fraction_result = directions.Result(issledovaniye=issled,
fraction=fraction)
fraction_result.value = str(resdict["result"][key]).strip() # Установка значения
if fraction_result.value.isdigit():
fraction_result.value = "%s.0" % fraction_result.value
import re
find = re.findall("\d+.\d+", fraction_result.value)
if len(find) > 0:
val = float(find[0]) * fractionRel.get_multiplier_display()
if fractionRel.full_round:
val = round(val)
fraction_result.value = fraction_result.value.replace(find[0], str(val))
fraction_result.iteration = 1 # Установка итерации
ref = fractionRel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save() # Сохранение
issled.api_app = models.Application.objects.get(key=appkey)
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user # Кто сохранил
from datetime import datetime
fraction_result.issledovaniye.time_save = timezone.now() # Время сохранения
fraction_result.issledovaniye.save()
if issled not in pks:
pks.append(issled)
for pkk in pks:
emit("results_save", {"pk": pkk, "user": None, "dir": direction.pk})
slog.Log(key=appkey, type=22, body=json.dumps(resdict), user=None).save()
result["ok"] = True
elif not directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
if dpk > -1:
resdict["pk"] = dpk
slog.Log(key=resdict["pk"], type=23, body=json.dumps(resdict), user=None).save()
except Exception as e:
result = {"ok": False, "Exception": True, "MSG": str(e)}
return JsonResponse(result)
@csrf_exempt
def endpoint(request):
result = {"answer": False, "body": ""}
data = json.loads(request.POST.get("result", request.GET.get("result", "{}")))
api_key = request.POST.get("key", request.GET.get("key", ""))
message_type = data.get("message_type", "C")
pk_s = str(data.get("pk", ""))
pk = -1 if not pk_s.isdigit() else int(pk_s)
data["app_name"] = "API key is incorrect"
# pid = data.get("processing_id", "P")
if models.Application.objects.filter(key=api_key).exists():
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
if astm_user is None:
astm_user = users.DoctorProfile.objects.filter(user__is_staff=True).order_by("pk").first()
app = models.Application.objects.get(key=api_key)
if app.active:
data["app_name"] = app.name
if message_type == "R" or data.get("result"):
if pk != -1:
dw = app.direction_work
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
dw = True
if dw:
direction = directions.Napravleniya.objects.filter(pk=pk).first()
else:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=pk).first()
pks = []
oks = []
if direction:
results = data.get("result", {})
for key in results:
ok = False
q = models.RelationFractionASTM.objects.filter(astm_field=key)
if q.filter(application_api=app).exists():
q = q.filter(application_api=app)
ok = True
elif q.filter(application_api__isnull=True).exists():
q = q.filter(application_api__isnull=True)
ok = True
if ok:
for fraction_rel in q:
save_state = []
issleds = []
for issled in directions.Issledovaniya.objects.filter(napravleniye=direction,
research=fraction_rel.fraction.research,
doc_confirmation__isnull=True):
if directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction_rel.fraction).exists():
fraction_result = directions.Result.objects.filter(issledovaniye=issled,
fraction=fraction_rel.fraction).order_by(
"-pk")[0]
else:
fraction_result = directions.Result(issledovaniye=issled,
fraction=fraction_rel.fraction)
fraction_result.value = str(results[key]).strip()
import re
find = re.findall("\d+.\d+", fraction_result.value)
if len(find) > 0:
val_str = fraction_result.value
for f in find:
val = app.truncate(float(f) * fraction_rel.get_multiplier_display())
val_str = val_str.replace(f, str(val))
fraction_result.value = val_str
fraction_result.iteration = 1
ref = fraction_rel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save()
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user
fraction_result.issledovaniye.time_save = timezone.now()
fraction_result.issledovaniye.save()
save_state.append({"fraction": fraction_result.fraction.title,
"value": fraction_result.value})
issleds.append({"pk": issled.pk, "title": issled.research.title})
if issled not in pks:
pks.append(issled)
# slog.Log(key=json.dumps({"direction": direction.pk, "issleds": str(issleds)}),
# type=22, body=json.dumps(save_state), user=None).save()
oks.append(ok)
result["body"] = "{} {} {} {}".format(dw, pk, json.dumps(oks), direction is not None)
for pkk in pks:
emit("results_save", {"pk": pkk, "user": None, "dir": direction.pk})
else:
result["body"] = "pk '{}' is not exists".format(pk_s)
elif message_type == "Q":
result["answer"] = True
pks = [int(x) for x in data.get("query", [])]
researches = defaultdict(list)
for row in app.get_issledovaniya(pks):
k = row["pk"]
i = row["iss"]
for fraction in Fractions.objects.filter(research=i.research,
hide=False):
rel = models.RelationFractionASTM.objects.filter(fraction=fraction, application_api=app)
if not rel.exists():
continue
# rel = models.RelationFractionASTM.objects.filter(fraction=fraction)
# if not rel.exists():
# continue
rel = rel[0]
researches[k].append(rel.astm_field)
result["body"] = researches
else:
pass
else:
data["app_name"] = "API app banned " + api_key
result["body"] = "API app banned " + api_key
else:
result["body"] = "API key is incorrect"
slog.Log(key=pk, type=6000, body=json.dumps({"data": data, "answer": result}), user=None).save()
return JsonResponse(result)
@login_required
def departments(request):
from podrazdeleniya.models import Podrazdeleniya
can_edit = request.user.is_superuser or request.user.doctorprofile.has_group(
'Создание и редактирование пользователей')
if request.method == "GET":
return JsonResponse(
{"departments": [{"pk": x.pk, "title": x.get_title(), "type": str(x.p_type), "updated": False} for
x in Podrazdeleniya.objects.all().order_by("pk")],
"can_edit": can_edit,
"types": [{"pk": str(x[0]), "title": x[1]} for x in Podrazdeleniya.TYPES if
(x[0] == 3 and SettingManager.get("paraclinic_module", default='false', default_type='b'))
or (x[0] == 4 and SettingManager.get("consults_module", default='false', default_type='b'))
or x[0] not in [3, 4]]})
elif can_edit:
ok = False
message = ""
try:
req = json.loads(request.body)
data_type = req.get("type", "update")
rows = req.get("data", [])
if data_type == "update":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya.objects.get(pk=row["pk"])
department.title = title
department.p_type = int(row["type"])
department.save()
ok = True
elif data_type == "insert":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya(title=title, p_type=int(row["type"]))
department.save()
ok = True
finally:
return JsonResponse({"ok": ok, "message": message})
return JsonResponse(0)
def bases(request):
return JsonResponse({"bases": [
{"pk": x.pk,
"title": x.title,
"code": x.short_title,
"hide": x.hide,
"history_number": x.history_number,
"internal_type": x.internal_type,
"fin_sources": [{
"pk": y.pk,
"title": y.title,
"default_diagnos": y.default_diagnos
} for y in directions.IstochnikiFinansirovaniya.objects.filter(base=x).order_by('-order_weight')]
} for x in CardBase.objects.all().order_by('-order_weight')]})
class ResearchesTemplates(View):
def get(self, request):
from django.db.models import Q
templates = []
for t in users.AssignmentTemplates.objects.filter(global_template=True) \
.filter(Q(doc__isnull=True, podrazdeleniye__isnull=True) |
Q(doc=request.user.doctorprofile) |
Q(podrazdeleniye=request.user.doctorprofile.podrazdeleniye)):
templates.append({"values": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=t)],
"pk": t.pk,
"title": t.title,
"for_current_user": t.doc is not None,
"for_users_department": t.podrazdeleniye is not None})
return JsonResponse({"templates": templates})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
from directory.models import Researches as DResearches
class Researches(View):
def get(self, request):
deps = defaultdict(list)
for r in DResearches.objects.filter(hide=False).order_by("title"):
autoadd = [x.b.pk for x in AutoAdd.objects.filter(a=r)]
addto = [x.a.pk for x in AutoAdd.objects.filter(b=r)]
deps[-2 if not r.podrazdeleniye else r.podrazdeleniye.pk].append(
{"pk": r.pk,
"onlywith": -1 if not r.onlywith else r.onlywith.pk,
"department_pk": -2 if not r.podrazdeleniye else r.podrazdeleniye.pk,
"title": r.get_title(),
"full_title": r.title,
"doc_refferal": r.is_doc_refferal,
"need_vich_code": r.need_vich_code,
"comment_variants": [] if not r.comment_variants else r.comment_variants.get_variants(),
"autoadd": autoadd,
"addto": addto,
"code": r.code,
"type": "4" if not r.podrazdeleniye else str(r.podrazdeleniye.p_type)
})
return JsonResponse({"researches": deps})
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def current_user_info(request):
ret = {"auth": request.user.is_authenticated, "doc_pk": -1, "username": "", "fio": "",
"department": {"pk": -1, "title": ""}, "groups": [], "modules": {
"l2_cards": SettingManager.get("l2_cards_module", default='false', default_type='b'),
}}
if ret["auth"]:
ret["username"] = request.user.username
ret["fio"] = request.user.doctorprofile.fio
ret["groups"] = list(request.user.groups.values_list('name', flat=True))
ret["doc_pk"] = request.user.doctorprofile.pk
ret["department"] = {"pk": request.user.doctorprofile.podrazdeleniye.pk,
"title": request.user.doctorprofile.podrazdeleniye.title}
return JsonResponse(ret)
@login_required
def directive_from(request):
from users.models import DoctorProfile
data = []
for dep in Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by('title'):
d = {"pk": dep.pk,
"title": dep.title,
"docs": [{"pk": x.pk, "fio": x.fio} for x in DoctorProfile.objects.filter(podrazdeleniye=dep,
user__groups__name="Лечащий врач").order_by(
"fio")]
}
data.append(d)
return JsonResponse({"data": data})
@login_required
def patients_search_card(request):
objects = []
data = []
d = json.loads(request.body)
card_type = CardBase.objects.get(pk=d['type'])
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p3 = re.compile(r'^[0-9]{1,15}$')
p4 = re.compile(r'card_pk:\d+', flags=re.IGNORECASE)
p4i = bool(re.search(p4, query.lower()))
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
c = None
if not p4i:
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1],
patronymic__startswith=initials[2], birthday=btday,
card__base=card_type)
if (card_type.is_rmis and len(objects) == 0) or card_type.internal_type:
c = Client(modules="patients")
objects = c.patients.import_individual_to_base(
{"surname": query[0] + "%", "name": query[1] + "%", "patrName": query[2] + "%",
"birthDate": btday},
fio=True)
except ValidationError:
objects = []
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p, card__base=card_type)[:10]
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p, card__base=card_type,
birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())[
:10]
if (card_type.is_rmis and (len(objects) == 0 or (len(split) < 4 and len(objects) < 10))) \
or card_type.internal_type:
objects = list(objects)
try:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(rmis_req, fio=True, limit=10 - len(objects))
except ConnectionError:
pass
if (re.search(p3, query) and not card_type.is_rmis) \
or (len(list(objects)) == 0 and len(query) == 16 and card_type.internal_type) \
or (card_type.is_rmis and not re.search(p3, query)):
resync = True
if len(list(objects)) == 0:
resync = False
try:
objects = Individual.objects.filter(card__number=query.upper(), card__is_archive=False,
card__base=card_type)
except ValueError:
pass
if (card_type.is_rmis or card_type.internal_type) and len(list(objects)) == 0 and len(query) == 16:
if not c:
c = Client(modules="patients")
objects = c.patients.import_individual_to_base(query)
else:
resync = True
if resync and card_type.is_rmis:
if not c:
c = Client(modules="patients")
sema = threading.BoundedSemaphore(10)
threads = list()
def sync_i(o: Individual, client: Client):
sema.acquire()
try:
o.sync_with_rmis(c=client)
finally:
sema.release()
for o in objects:
thread = threading.Thread(target=sync_i, args=(o, c))
threads.append(thread)
thread.start()
if p4i:
cards = Card.objects.filter(pk=int(query.split(":")[1]))
else:
cards = Card.objects.filter(base=card_type, individual__in=objects, is_archive=False)
if re.match(p3, query):
cards = cards.filter(number=query)
for row in cards.filter(is_archive=False).prefetch_related("individual").distinct():
docs = Document.objects.filter(individual__pk=row.individual.pk, is_active=True,
document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])\
.distinct("pk", "number", "document_type", "serial").order_by('pk')
data.append({"type_title": card_type.title,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual.pk,
"pk": row.pk,
"phones": row.get_phones(),
"main_diagnosis": row.main_diagnosis,
"docs": [{**model_to_dict(x), "type_title": x.document_type.title} for x in docs]})
return JsonResponse({"results": data})
def full_patient_search_data(p, query):
dp = re.compile(r'^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$')
split = str(re.sub(' +', ' ', str(query))).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
if re.search(dp, split[2]):
split = [split[0], split[1], '', split[2]]
else:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
btday = split[3].split(".")
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
return f, n, p, rmis_req, split
@login_required
def patients_search_individual(request):
objects = []
data = []
d = json.loads(request.body)
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p4 = re.compile(r'individual_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1],
patronymic__startswith=initials[2], birthday=btday)
except ValidationError:
objects = []
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p)
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n,
patronymic__istartswith=p,
birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())
if re.search(p4, query):
objects = Individual.objects.filter(pk=int(query.split(":")[1]))
n = 0
if not isinstance(objects, list):
for row in objects.distinct().order_by("family", "name", "patronymic", "birthday"):
n += 1
data.append({"family": row.family,
"name": row.name,
"patronymic": row.patronymic,
"birthday": row.bd(),
"age": row.age_s(),
"sex": row.sex,
"pk": row.pk})
if n == 25:
break
return JsonResponse({"results": data})
@login_required
@group_required("Лечащий врач", "Оператор лечащего врача")
def directions_generate(request):
result = {"ok": False, "directions": [], "message": ""}
if request.method == "POST":
p = json.loads(request.body)
rc = directions.Napravleniya.gen_napravleniya_by_issledovaniya(p.get("card_pk"),
p.get("diagnos"),
p.get("fin_source"),
p.get("history_num"),
p.get("ofname_pk"),
request.user.doctorprofile,
p.get("researches"),
p.get("comments"),
p.get("for_rmis"),
p.get("rmis_data", {}),
vich_code=p.get("vich_code", ""))
result["ok"] = rc["r"]
result["directions"] = rc["list_id"]
if "message" in rc:
result["message"] = rc["message"]
return JsonResponse(result)
@login_required
def directions_history(request):
res = {"directions": []}
request_data = json.loads(request.body)
pk = request_data.get("patient", -1)
req_status = request_data.get("type", 4)
date_start, date_end = try_parse_range(request_data["date_from"], request_data["date_to"])
try:
if pk >= 0 or req_status == 4:
if req_status != 4:
rows = directions.Napravleniya.objects.filter(data_sozdaniya__range=(date_start, date_end),
client__pk=pk).order_by(
"-data_sozdaniya").prefetch_related()
else:
rows = directions.Napravleniya.objects.filter(Q(data_sozdaniya__range=(date_start, date_end),
doc_who_create=request.user.doctorprofile)
| Q(data_sozdaniya__range=(date_start, date_end),
doc=request.user.doctorprofile)).order_by(
"-data_sozdaniya")
for napr in rows.values("pk", "data_sozdaniya", "cancel"):
iss_list = directions.Issledovaniya.objects.filter(napravleniye__pk=napr["pk"]).prefetch_related(
"tubes", "research", "research__podrazdeleniye")
if not iss_list.exists():
continue
status = 2 # 0 - выписано. 1 - Материал получен лабораторией. 2 - результат подтвержден. -1 - отменено
has_conf = False
researches_list = []
researches_pks = []
has_descriptive = False
for v in iss_list:
if v.research.podrazdeleniye and v.research.podrazdeleniye.p_type == Podrazdeleniya.PARACLINIC:
has_descriptive = True
researches_list.append(v.research.title)
researches_pks.append(v.research.pk)
iss_status = 1
if not v.doc_confirmation and not v.doc_save and not v.deferred:
iss_status = 1
if v.tubes.count() == 0:
iss_status = 0
else:
for t in v.tubes.all():
if not t.time_recive:
iss_status = 0
elif v.doc_confirmation or v.deferred:
iss_status = 2
if v.doc_confirmation and not has_conf:
has_conf = True
status = min(iss_status, status)
if status == 2 and not has_conf:
status = 1
if req_status in [3, 4] or req_status == status:
res["directions"].append(
{"pk": napr["pk"], "status": -1 if status == 0 and napr["cancel"] else status,
"researches": ' | '.join(researches_list),
"researches_pks": researches_pks,
"date": str(dateformat.format(napr["data_sozdaniya"].date(), settings.DATE_FORMAT_SHORT)),
"lab": "Консультации" if not iss_list[0].research.get_podrazdeleniye() or iss_list[
0].research.is_doc_refferal
else iss_list[0].research.get_podrazdeleniye().title, "cancel": napr["cancel"],
"checked": False,
"has_descriptive": has_descriptive})
except (ValueError, IndexError) as e:
res["message"] = str(e)
return JsonResponse(res)
@login_required
def directions_cancel(request):
response = {"cancel": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk).exists():
direction = directions.Napravleniya.objects.get(pk=pk)
direction.cancel = not direction.cancel
direction.save()
response["cancel"] = direction.cancel
return JsonResponse(response)
@login_required
def researches_params(request):
response = {"researches": []}
request_data = json.loads(request.body)
pks = request_data.get("pks", [])
for research in DResearches.objects.filter(pk__in=pks):
params = []
if research.is_paraclinic:
for g in ParaclinicInputGroups.objects.filter(research=research).exclude(title="").order_by("order"):
params.append({"pk": g.pk, "title": g.title})
else:
for f in Fractions.objects.filter(research=research).order_by("sort_weight"):
params.append({"pk": f.pk, "title": f.title})
response["researches"].append({"pk": research.pk, "title": research.title,
"short_title": research.get_title(),
"params": params, "is_paraclinic": research.is_paraclinic,
"selected_params": []})
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_by_department(request):
response = {"researches": []}
request_data = json.loads(request.body)
department_pk = int(request_data["department"])
if department_pk != -1:
for research in DResearches.objects.filter(podrazdeleniye__pk=department_pk).order_by("title"):
response["researches"].append({
"pk": research.pk,
"title": research.title,
"short_title": research.short_title,
"preparation": research.preparation,
"hide": research.hide,
"code": research.code,
})
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_update(request):
response = {"ok": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -2)
if pk > -2:
department_pk = request_data.get("department")
title = request_data.get("title").strip()
short_title = request_data.get("short_title").strip()
code = request_data.get("code").strip()
info = request_data.get("info").strip()
hide = request_data.get("hide")
groups = request_data.get("groups")
if len(title) > 0 and Podrazdeleniya.objects.filter(pk=department_pk).exists():
department = Podrazdeleniya.objects.filter(pk=department_pk)[0]
res = None
if pk == -1:
res = DResearches(title=title, short_title=short_title, podrazdeleniye=department, code=code,
is_paraclinic=department.p_type == 3, paraclinic_info=info, hide=hide)
elif DResearches.objects.filter(pk=pk).exists():
res = DResearches.objects.filter(pk=pk)[0]
res.title = title
res.short_title = short_title
res.podrazdeleniye = department
res.code = code
res.is_paraclinic = department.p_type == 3
res.paraclinic_info = info
res.hide = hide
if res:
res.save()
for group in groups:
g = None
pk = group["pk"]
if pk == -1:
g = ParaclinicInputGroups(title=group["title"],
show_title=group["show_title"],
research=res,
order=group["order"],
hide=group["hide"])
elif ParaclinicInputGroups.objects.filter(pk=pk).exists():
g = ParaclinicInputGroups.objects.get(pk=pk)
g.title = group["title"]
g.show_title = group["show_title"]
g.research = res
g.order = group["order"]
g.hide = group["hide"]
if g:
g.save()
for field in group["fields"]:
f = None
pk = field["pk"]
if pk == -1:
f = ParaclinicInputField(title=field["title"],
group=g,
order=field["order"],
lines=field["lines"],
hide=field["hide"],
default_value=field["default"],
input_templates=json.dumps(field["values_to_input"]))
elif ParaclinicInputField.objects.filter(pk=pk).exists():
f = ParaclinicInputField.objects.get(pk=pk)
f.title = field["title"]
f.group = g
f.order = field["order"]
f.lines = field["lines"]
f.hide = field["hide"]
f.default_value = field["default"]
f.input_templates = json.dumps(field["values_to_input"])
if f:
f.save()
response["ok"] = True
slog.Log(key=pk, type=10000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def researches_details(request):
response = {"pk": -1, "department": -1, "title": '', "short_title": '', "code": '', "info": '', "hide": False,
"groups": []}
request_data = json.loads(request.body)
pk = request_data.get("pk")
if DResearches.objects.filter(pk=pk).exists():
res = DResearches.objects.filter(pk=pk)[0]
response["pk"] = res.pk
response["department"] = res.podrazdeleniye.pk
response["title"] = res.title
response["short_title"] = res.short_title
response["code"] = res.code
response["info"] = res.paraclinic_info or ""
response["hide"] = res.hide
for group in ParaclinicInputGroups.objects.filter(research__pk=pk).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"default": field.default_value,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates),
"new_value": ""
})
response["groups"].append(g)
return JsonResponse(response)
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
def paraclinic_details(request):
response = {"groups": []}
request_data = json.loads(request.body)
pk = request_data.get("pk")
for group in ParaclinicInputGroups.objects.filter(research__pk=pk).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"default": field.default_value,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates)
})
response["groups"].append(g)
return JsonResponse(response)
@login_required
def directions_results(request):
result = {"ok": False,
"direction": {"pk": -1, "doc": "", "date": ""},
"client": {},
"full": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if directions.Napravleniya.objects.filter(pk=pk).exists():
napr = directions.Napravleniya.objects.get(pk=pk)
dates = {}
for iss in directions.Issledovaniya.objects.filter(napravleniye=napr, time_save__isnull=False):
if iss.time_save:
dt = str(dateformat.format(iss.time_save, settings.DATE_FORMAT))
if dt not in dates.keys():
dates[dt] = 0
dates[dt] += 1
import operator
maxdate = ""
if dates != {}:
maxdate = max(dates.items(), key=operator.itemgetter(1))[0]
iss_list = directions.Issledovaniya.objects.filter(napravleniye=napr)
t = 0
if not iss_list.filter(doc_confirmation__isnull=True).exists() or iss_list.filter(deferred=False).exists():
result["direction"]["pk"] = napr.pk
result["full"] = False
result["ok"] = True
if iss_list.filter(doc_confirmation__isnull=False).exists():
result["direction"]["doc"] = iss_list.filter(doc_confirmation__isnull=False)[
0].doc_confirmation.get_fio()
if iss_list.filter(doc_confirmation__isnull=True, deferred=False).exists():
result["direction"]["doc"] = result["direction"]["doc"] + " (выполнено не полностью)"
else:
result["full"] = True
else:
result["direction"]["doc"] = "Не подтверждено"
result["direction"]["date"] = maxdate
result["client"]["sex"] = napr.client.individual.sex
result["client"]["fio"] = napr.client.individual.fio()
result["client"]["age"] = napr.client.individual.age_s(direction=napr)
result["client"]["cardnum"] = napr.client.number_with_type()
result["client"]["dr"] = napr.client.individual.bd()
result["results"] = collections.OrderedDict()
isses = []
for issledovaniye in iss_list.order_by("tubes__id", "research__sort_weight"):
if issledovaniye.pk in isses:
continue
isses.append(issledovaniye.pk)
t += 1
kint = "%s_%s_%s_%s" % (t,
"-1" if not issledovaniye.research.direction else issledovaniye.research.direction.pk,
issledovaniye.research.sort_weight,
issledovaniye.research.pk)
result["results"][kint] = {"title": issledovaniye.research.title,
"fractions": collections.OrderedDict(),
"sort": issledovaniye.research.sort_weight,
"tube_time_get": ""}
if not issledovaniye.deferred or issledovaniye.doc_confirmation:
for isstube in issledovaniye.tubes.all():
if isstube.time_get:
result["results"][kint]["tube_time_get"] = str(
dateformat.format(isstube.time_get, settings.DATE_FORMAT))
break
results = directions.Result.objects.filter(issledovaniye=issledovaniye).order_by(
"fraction__sort_weight") # Выборка результатов из базы
n = 0
for res in results: # Перебор результатов
pk = res.fraction.sort_weight
if not pk or pk <= 0:
pk = res.fraction.pk
if res.fraction.render_type == 0:
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = result_normal(res.value)
result["results"][kint]["fractions"][pk]["title"] = res.fraction.title
result["results"][kint]["fractions"][pk]["units"] = res.get_units()
refs = res.get_ref(full=True)
ref_m = refs["m"]
ref_f = refs["f"]
if isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f
else:
try:
tmp_results = json.loads("{}" if not res.value else res.value).get("rows", {})
except Exception:
tmp_results = {}
n = 0
for row in tmp_results.values():
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "Выделенная культура"
result["results"][kint]["fractions"][tmp_pk]["result"] = row["title"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
for subrow in row["rows"].values():
if "null" in subrow["value"]:
continue
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = subrow["title"]
result["results"][kint]["fractions"][tmp_pk]["result"] = subrow["value"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk][
"title"] = "S - чувствителен; R - резистентен; I - промежуточная чувствительность;"
result["results"][kint]["fractions"][tmp_pk]["result"] = ""
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
if issledovaniye.lab_comment and issledovaniye.lab_comment != "":
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "Комментарий"
result["results"][kint]["fractions"][tmp_pk]["result"] = issledovaniye.lab_comment.replace("\n",
"<br/>")
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
else:
fr_list = Fractions.objects.filter(research=issledovaniye.research)
for fr in fr_list:
pk = fr.sort_weight
if not pk or pk <= 0:
pk = fr.pk
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = "отложен" # Значение
result["results"][kint]["fractions"][pk][
"title"] = fr.title # Название фракции
result["results"][kint]["fractions"][pk][
"units"] = fr.units # Еденицы измерения
ref_m = {"": ""} # fr.ref_m
ref_f = {"": ""} # fr.ref_f
if not isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if not isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m # Референсы М
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f # Референсы Ж
return JsonResponse(result)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_types(request):
result = {"visit": [{"pk": x.pk, "title": x.title} for x in VisitPurpose.objects.filter(hide=False).order_by("pk")],
"result": [{"pk": x.pk, "title": x.title} for x in
ResultOfTreatment.objects.filter(hide=False).order_by("pk")],
"outcome": [{"pk": x.pk, "title": x.title} for x in
Outcomes.objects.filter(hide=False).order_by("pk")],
"exclude": [{"pk": x.pk, "title": x.title} for x in
ExcludePurposes.objects.filter(hide=False).order_by("pk")]}
return JsonResponse(result)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_send(request):
response = {"ok": True}
rd = json.loads(request.body)
ofname = rd.get("ofname") or -1
doc = None
if ofname > -1 and users.DoctorProfile.objects.filter(pk=ofname).exists():
doc = users.DoctorProfile.objects.get(pk=ofname)
t = StatisticsTicket(card=Card.objects.get(pk=rd["card_pk"]),
purpose=VisitPurpose.objects.get(pk=rd["visit"]),
result=ResultOfTreatment.objects.get(pk=rd["result"]),
info=rd["info"].strip(),
first_time=rd["first_time"],
primary_visit=rd["primary_visit"],
dispensary_registration=int(rd["disp"]),
doctor=doc or request.user.doctorprofile,
creator=request.user.doctorprofile,
outcome=Outcomes.objects.filter(pk=rd["outcome"]).first(),
dispensary_exclude_purpose=ExcludePurposes.objects.filter(pk=rd["exclude"]).first(),
dispensary_diagnos=rd["disp_diagnos"],
date_ticket=rd.get("date_ticket", None))
t.save()
Log(key="", type=7000, body=json.dumps(rd), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_get(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
n = 0
for row in StatisticsTicket.objects.filter(
Q(doctor=request.user.doctorprofile) | Q(creator=request.user.doctorprofile)).filter(
date__range=(date_start, date_end,)).order_by('pk'):
if not row.invalid_ticket:
n += 1
response["data"].append({
"pk": row.pk,
"n": n if not row.invalid_ticket else '',
"doc": row.doctor.get_fio(),
"date_ticket": row.get_date(),
"department": row.doctor.podrazdeleniye.get_title(),
"patinet": row.card.individual.fio(full=True),
"card": row.card.number_with_type(),
"purpose": row.purpose.title if row.purpose else "",
"first_time": row.first_time,
"primary": row.primary_visit,
"info": row.info,
"disp": row.get_dispensary_registration_display()
+ (" (" + row.dispensary_diagnos + ")" if row.dispensary_diagnos != "" else "")
+ (" (" + row.dispensary_exclude_purpose.title + ")" if row.dispensary_exclude_purpose else ""),
"result": row.result.title if row.result else "",
"outcome": row.outcome.title if row.outcome else "",
"invalid": row.invalid_ticket,
"can_invalidate": row.can_invalidate()
})
return JsonResponse(response)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_invalidate(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
if StatisticsTicket.objects.filter(
Q(doctor=request.user.doctorprofile) | Q(creator=request.user.doctorprofile)).filter(
pk=request_data.get("pk", -1)).exists():
if StatisticsTicket.objects.get(pk=request_data["pk"]).can_invalidate():
for s in StatisticsTicket.objects.filter(pk=request_data["pk"]):
s.invalid_ticket = request_data.get("invalid", False)
s.save()
response["ok"] = True
Log(key=str(request_data["pk"]), type=7001, body=json.dumps(request_data.get("invalid", False)),
user=request.user.doctorprofile).save()
else:
response["message"] = "Время на отмену или возврат истекло"
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_form(request):
import time
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1) or -1
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
add_f = {}
add_fr = {}
if not request.user.is_superuser:
add_f = dict(issledovaniya__research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
add_fr = dict(research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True, **add_f).exists():
response["ok"] = True
d = \
directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True,
**add_f).distinct()[
0]
response["patient"] = {
"fio_age": d.client.individual.fio(full=True),
"card": d.client.number_with_type(),
"doc": "" if not d.doc else (d.doc.get_fio(dots=True) + ", " + d.doc.podrazdeleniye.title),
"imported_from_rmis": d.imported_from_rmis,
"imported_org": "" if not d.imported_org else d.imported_org.title,
}
response["direction"] = {
"pk": d.pk,
"date": strdate(d.data_sozdaniya),
"diagnos": d.diagnos,
"fin_source": "" if not d.istochnik_f else d.istochnik_f.title
}
response["researches"] = []
for i in directions.Issledovaniya.objects.filter(napravleniye=d, research__is_paraclinic=True, **add_fr):
ctp = int(0 if not i.time_confirmation else int(
time.mktime(timezone.localtime(i.time_confirmation).timetuple())))
ctime = int(time.time())
cdid = -1 if not i.doc_confirmation else i.doc_confirmation.pk
rt = SettingManager.get("lab_reset_confirm_time_min") * 60
iss = {
"pk": i.pk,
"research": {
"title": i.research.title,
"groups": []
},
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None,
"allow_reset_confirm": ((
ctime - ctp < rt and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in
request.user.groups.all()]) and i.time_confirmation is not None,
}
for group in ParaclinicInputGroups.objects.filter(research=i.research, hide=False).order_by("order"):
g = {"pk": group.pk, "order": group.order, "title": group.title, "show_title": group.show_title,
"hide": group.hide, "fields": []}
for field in ParaclinicInputField.objects.filter(group=group, hide=False).order_by("order"):
g["fields"].append({
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"hide": field.hide,
"values_to_input": json.loads(field.input_templates),
"value": field.default_value if not directions.ParaclinicResult.objects.filter(
issledovaniye=i, field=field).exists() else
directions.ParaclinicResult.objects.filter(issledovaniye=i, field=field)[0].value,
})
iss["research"]["groups"].append(g)
response["researches"].append(iss)
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
def delete_keys_from_dict(dict_del, lst_keys):
for k in lst_keys:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, lst_keys)
if isinstance(v, list):
for ll in v:
delete_keys_from_dict(ll, lst_keys)
return dict_del
@group_required("Врач параклиники")
def directions_paraclinic_result(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body).get("data", {})
pk = request_data.get("pk", -1)
with_confirm = json.loads(request.body).get("with_confirm", False)
if directions.Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True,
research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
for group in request_data["research"]["groups"]:
for field in group["fields"]:
if not ParaclinicInputField.objects.filter(pk=field["pk"]).exists():
continue
f = ParaclinicInputField.objects.get(pk=field["pk"])
if not directions.ParaclinicResult.objects.filter(issledovaniye=iss, field=f).exists():
f_result = directions.ParaclinicResult(issledovaniye=iss, field=f, value="")
else:
f_result = directions.ParaclinicResult.objects.filter(issledovaniye=iss, field=f)[0]
f_result.value = field["value"]
f_result.save()
iss.doc_save = request.user.doctorprofile
iss.time_save = timezone.now()
if with_confirm:
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = timezone.now()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = timezone.now()
iss.napravleniye.save()
iss.save()
response["ok"] = True
slog.Log(key=pk, type=13, body="", user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_confirm(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if directions.Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True,
research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
t = timezone.now()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = t
iss.napravleniye.save()
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = t
iss.save()
response["ok"] = True
slog.Log(key=pk, type=14, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Врач параклиники", "Сброс подтверждений результатов")
def directions_paraclinic_confirm_reset(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if directions.Issledovaniya.objects.filter(pk=pk).exists():
iss = directions.Issledovaniya.objects.get(pk=pk)
import time
ctp = int(
0 if not iss.time_confirmation else int(time.mktime(timezone.localtime(iss.time_confirmation).timetuple())))
ctime = int(time.time())
cdid = -1 if not iss.doc_confirmation else iss.doc_confirmation.pk
if (ctime - ctp < SettingManager.get(
"lab_reset_confirm_time_min") * 60 and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in request.user.groups.all()]:
predoc = {"fio": iss.doc_confirmation.get_fio(), "pk": iss.doc_confirmation.pk,
"direction": iss.napravleniye.pk}
iss.doc_confirmation = iss.time_confirmation = None
iss.save()
if iss.napravleniye.result_rmis_send:
c = Client()
c.directions.delete_services(iss.napravleniye, request.user.doctorprofile)
response["ok"] = True
slog.Log(key=pk, type=24, body=json.dumps(predoc), user=request.user.doctorprofile).save()
else:
response["message"] = "Сброс подтверждения разрешен в течении %s минут" % (
str(SettingManager.get("lab_reset_confirm_time_min")))
return JsonResponse(response)
@group_required("Врач параклиники")
def directions_paraclinic_history(request):
response = {"directions": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
has_dirs = []
for direction in directions. \
Napravleniya.objects.filter(Q(issledovaniya__doc_save=request.user.doctorprofile) |
Q(issledovaniya__doc_confirmation=request.user.doctorprofile)) \
.filter(Q(issledovaniya__time_confirmation__range=(date_start, date_end)) |
Q(issledovaniya__time_save__range=(date_start, date_end))) \
.order_by("-issledovaniya__time_save", "-issledovaniya__time_confirmation"):
if direction.pk in has_dirs:
continue
has_dirs.append(direction.pk)
d = {
"pk": direction.pk,
"date": strdate(direction.data_sozdaniya),
"patient": direction.client.individual.fio(full=True, direction=direction),
"card": direction.client.number_with_type(),
"iss": [],
"all_confirmed": True,
"all_saved": True
}
for i in directions.Issledovaniya.objects.filter(napravleniye=direction).order_by("pk"):
iss = {"title": i.research.title,
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None}
d["iss"].append(iss)
if not iss["saved"]:
d["all_saved"] = False
if not iss["confirmed"]:
d["all_confirmed"] = False
response["directions"].append(d)
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_services(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True).exists():
n = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True)[0]
cdid, ctime, ctp, rt = get_reset_time_vars(n)
response["ok"] = True
researches = []
for i in directions.Issledovaniya.objects.filter(napravleniye=n):
researches.append({"title": i.research.title,
"department": "" if not i.research.podrazdeleniye else i.research.podrazdeleniye.get_title()})
response["direction_data"] = {
"date": strdate(n.data_sozdaniya),
"client": n.client.individual.fio(full=True),
"card": n.client.number_with_type(),
"diagnos": n.diagnos,
"doc": "" if not n.doc else "{}, {}".format(n.doc.get_fio(), n.doc.podrazdeleniye.title),
"imported_from_rmis": n.imported_from_rmis,
"imported_org": "" if not n.imported_org else n.imported_org.title,
"visit_who_mark": "" if not n.visit_who_mark else "{}, {}".format(n.visit_who_mark.get_fio(),
n.visit_who_mark.podrazdeleniye.title),
"fin_source": "" if not n.istochnik_f else "{} - {}".format(n.istochnik_f.base.title, n.istochnik_f.title),
}
response["researches"] = researches
response["loaded_pk"] = pk
response["visit_status"] = n.visit_date is not None
response["visit_date"] = "" if not n.visit_date else strdatetime(n.visit_date)
response["allow_reset_confirm"] = bool(((
ctime - ctp < rt and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in
request.user.groups.all()]) and n.visit_date)
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
def get_reset_time_vars(n):
ctp = int(0 if not n.visit_date else int(time.mktime(timezone.localtime(n.visit_date).timetuple())))
ctime = int(time.time())
cdid = -1 if not n.visit_who_mark else n.visit_who_mark_id
rt = SettingManager.get("visit_reset_time_min", default="20.0", default_type='f') * 60
return cdid, ctime, ctp, rt
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_mark_visit(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
cancel = request_data.get("cancel", False)
if directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True).exists():
n = directions.Napravleniya.objects.filter(pk=pk, issledovaniya__research__is_paraclinic=True)[0]
if not cancel:
n.visit_date = timezone.now()
n.visit_who_mark = request.user.doctorprofile
n.save()
cdid, ctime, ctp, rt = get_reset_time_vars(n)
allow_reset_confirm = bool(((
ctime - ctp < rt and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in
request.user.groups.all()]) and n.visit_date)
response["visit_status"] = n.visit_date is not None
response["visit_date"] = strdatetime(n.visit_date)
response["allow_reset_confirm"] = allow_reset_confirm
response["ok"] = True
else:
ctp = int(0 if not n.visit_date else int(time.mktime(timezone.localtime(n.visit_date).timetuple())))
ctime = int(time.time())
cdid = -1 if not n.visit_who_mark else n.visit_who_mark_id
rtm = SettingManager.get("visit_reset_time_min", default="20.0", default_type='f')
rt = rtm * 60
allow_reset_confirm = bool(((
ctime - ctp < rt and cdid == request.user.doctorprofile.pk) or request.user.is_superuser or "Сброс подтверждений результатов" in [
str(x) for x in
request.user.groups.all()]) and n.visit_date)
if allow_reset_confirm:
response["ok"] = True
response["visit_status"] = None
response["visit_date"] = ''
response["allow_reset_confirm"] = False
n.visit_date = None
n.visit_who_mark = None
n.save()
else:
response["message"] = "Отмена посещения возможна только в течении {} мин.".format(rtm)
slog.Log(key=pk, type=5001,
body=json.dumps({"Посещение": "отмена" if cancel else "да", "Дата и время": response["visit_date"]}),
user=request.user.doctorprofile).save()
else:
response["message"] = "Направление не найдено"
return JsonResponse(response)
@group_required("Врач параклиники", "Посещения по направлениям")
def directions_visit_journal(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
for v in directions.Napravleniya.objects.filter(visit_date__range=(date_start, date_end,),
visit_who_mark=request.user.doctorprofile).order_by("-visit_date"):
response["data"].append({
"pk": v.pk,
"client": v.client.individual.fio(full=True),
"card": v.client.number_with_type(),
"datetime": strdatetime(v.visit_date)
})
return JsonResponse(response)
@login_required
def directions_last_result(request):
response = {"ok": False, "data": {}, "type": "result", "has_last_result": False}
request_data = json.loads(request.body)
individual = request_data.get("individual", -1)
research = request_data.get("research", -1)
i = directions.Issledovaniya.objects.filter(napravleniye__client__individual__pk=individual,
research__pk=research,
time_confirmation__isnull=False).order_by("-time_confirmation").first()
u = directions.Issledovaniya.objects.filter(napravleniye__client__individual__pk=individual,
research__pk=research,
time_confirmation__isnull=True).order_by(
"-napravleniye__data_sozdaniya").first()
v = directions.Issledovaniya.objects.filter(napravleniye__client__individual__pk=individual,
research__pk=research,
research__is_paraclinic=True,
time_confirmation__isnull=True,
napravleniye__visit_date__isnull=False).order_by(
"-napravleniye__visit_date").first()
if i:
if not u or i.time_confirmation >= u.napravleniye.data_sozdaniya:
response["ok"] = True
if v and v.napravleniye.visit_date > i.time_confirmation:
response["type"] = "visit"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(v.napravleniye.visit_date),
"ts": tsdatetime(v.napravleniye.visit_date)}
response["has_last_result"] = True
response["last_result"] = {"direction": i.napravleniye_id, "datetime": strdate(i.time_confirmation),
"ts": tsdatetime(i.time_confirmation),
"is_paraclinic": i.research.is_paraclinic}
else:
response["data"] = {"direction": i.napravleniye_id, "datetime": strdate(i.time_confirmation),
"ts": tsdatetime(i.time_confirmation), "is_paraclinic": i.research.is_paraclinic}
elif u:
response["ok"] = True
if v and v.napravleniye.visit_date > u.napravleniye.data_sozdaniya:
response["type"] = "visit"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(v.napravleniye.visit_date),
"ts": tsdatetime(v.napravleniye.visit_date)}
else:
response["type"] = "direction"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(u.napravleniye.data_sozdaniya),
"ts": tsdatetime(u.napravleniye.data_sozdaniya)}
response["has_last_result"] = True
response["last_result"] = {"direction": i.napravleniye_id, "datetime": strdate(i.time_confirmation),
"ts": tsdatetime(i.time_confirmation), "is_paraclinic": i.research.is_paraclinic}
elif u:
response["ok"] = True
if v and v.napravleniye.visit_date > u.napravleniye.data_sozdaniya:
response["type"] = "visit"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(v.napravleniye.visit_date),
"ts": tsdatetime(v.napravleniye.visit_date)}
else:
response["type"] = "direction"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(u.napravleniye.data_sozdaniya),
"ts": tsdatetime(u.napravleniye.data_sozdaniya)}
return JsonResponse(response)
@login_required
def directions_results_report(request):
import re
data = []
request_data = json.loads(request.body)
individual_pk = request_data.get("individual", -1)
slog.Log(key=str(individual_pk), type=20000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
params = request_data.get("params", [])
date_start, date_end = try_parse_range(request_data.get("date_start"), request_data.get("date_end"))
pat = re.compile(r"^\d+(.\d+)?-\d+(.\d+)?$")
if Individual.objects.filter(pk=individual_pk).exists():
i = Individual.objects.get(pk=individual_pk)
for param in params:
ppk = param["pk"]
if param["is_paraclinic"]:
if ParaclinicInputGroups.objects.filter(pk=ppk).exists():
g = ParaclinicInputGroups.objects.get(pk=ppk)
for i in directions.Issledovaniya.objects.filter(research__paraclinicinputgroups=g,
time_confirmation__isnull=False):
res = []
for r in directions.ParaclinicResult.objects.filter(field__group=g,
issledovaniye=i).order_by("field__order"):
if r.value == "":
continue
res.append((r.field.title + ": " if r.field.title != "" else "") + r.value)
if len(res) == 0:
continue
paramdata = {"research": i.research.pk,
"pk": ppk,
"order": g.order,
"date": strdate(i.time_confirmation),
"timestamp": tsdatetime(i.time_confirmation),
"value": "; ".join(res),
"units": "",
"is_norm": "normal",
"not_norm_dir": "",
"delta": 0,
"active_ref": {},
"direction": i.napravleniye.pk}
data.append(paramdata)
else:
if Fractions.objects.filter(pk=ppk).exists():
f = Fractions.objects.get(pk=ppk)
for r in directions.Result.objects.filter(issledovaniye__napravleniye__client__individual=i,
fraction=f,
issledovaniye__time_confirmation__range=(
date_start, date_end)):
if r.value == "":
continue
is_norm = r.get_is_norm()
not_norm_dir = ""
delta = ""
active_ref = r.calc_normal(fromsave=False, only_ref=True)
if "r" in active_ref and re.match(r"^\d+(\.\d+)?$", r.value.replace(",", ".").strip()):
x = float(r.value.replace(",", ".").strip())
spl = r.calc_normal(fromsave=False, only_ref=True, raw_ref=False)
if (isinstance(spl, list) or isinstance(spl, tuple)) and len(spl) == 2:
if spl[0] >= x:
not_norm_dir = "down"
nx = spl[0] - x
n10 = spl[0] * 0.2
if nx <= n10:
not_norm_dir = "n_down"
delta = nx
elif spl[1] <= x:
not_norm_dir = "up"
nx = x - spl[1]
n10 = spl[1] * 0.2
if nx <= n10:
not_norm_dir = "n_up"
delta = nx
paramdata = {"research": f.research.pk,
"pk": ppk,
"order": f.sort_weight,
"date": strdate(r.issledovaniye.time_confirmation),
"timestamp": tsdatetime(r.issledovaniye.time_confirmation),
"value": r.value,
"units": r.get_units(),
"is_norm": is_norm,
"not_norm_dir": not_norm_dir,
"delta": delta,
"active_ref": active_ref,
"direction": r.issledovaniye.napravleniye.pk}
data.append(paramdata)
data.sort(key=itemgetter("timestamp"), reverse=True)
data.sort(key=itemgetter("pk"))
data.sort(key=itemgetter("order"))
data.sort(key=itemgetter("research"))
return JsonResponse({"data": data})
def mkb10(request):
kw = request.GET.get("keyword", "")
data = []
for d in directions.Diagnoses.objects.filter(code__istartswith=kw, d_type="mkb10.4").order_by("code")[:11]:
data.append({"pk": d.pk, "code": d.code, "title": d.title})
return JsonResponse({"data": data})
def vich_code(request):
kw = request.GET.get("keyword", "")
data = []
for d in directions.Diagnoses.objects.filter(code__istartswith=kw, d_type="vc").order_by("code")[:11]:
data.append({"pk": d.pk, "code": d.code, "title": {"-": ""}.get(d.title, d.title)})
return JsonResponse({"data": data})
@login_required
def directions_rmis_directions(request):
request_data = json.loads(request.body)
pk = request_data.get("pk")
rows = []
if pk and Card.objects.filter(pk=pk, base__is_rmis=True).exists():
c = Client(modules=["directions", "services"])
sd = c.directions.get_individual_active_directions(Card.objects.get(pk=pk).number)
dirs_data = [c.directions.get_direction_full_data(x) for x in sd if
not directions.Napravleniya.objects.filter(rmis_number=x).exists()]
rows = [x for x in dirs_data if x]
return JsonResponse({"rows": rows})
@login_required
def directions_rmis_direction(request):
request_data = json.loads(request.body)
data = {}
pk = request_data.get("pk")
if pk and not directions.Napravleniya.objects.filter(rmis_number=pk).exists():
data = get_direction_full_data_cache(pk)
if not data:
c = Client(modules=["directions", "services"])
data = c.directions.get_direction_full_data(pk)
return JsonResponse(data)
@login_required
@group_required("Подтверждение отправки результатов в РМИС")
def rmis_confirm_list(request):
request_data = json.loads(request.body)
data = {"directions": []}
date_start, date_end = try_parse_range(request_data["date_from"], request_data["date_to"])
d = directions.Napravleniya.objects.filter(istochnik_f__rmis_auto_send=False,
force_rmis_send=False,
issledovaniya__time_confirmation__range=(date_start, date_end)) \
.exclude(issledovaniya__time_confirmation__isnull=True).distinct().order_by("pk")
data["directions"] = [{
"pk": x.pk,
"patient": {
"fiodr": x.client.individual.fio(full=True),
"card": x.client.number_with_type()
},
"fin": x.istochnik_f.title
} for x in d]
return JsonResponse(data)
@csrf_exempt
def flg(request):
ok = False
dpk = request.POST["directionId"]
content = request.POST["content"]
date = try_strptime(request.POST["date"])
doc_f = request.POST["doc"].lower()
ds = directions.Napravleniya.objects.filter(pk=dpk)
if ds.exists():
d = ds[0]
iss = directions.Issledovaniya.objects.filter(napravleniye=d, research__code="A06.09.006")
if iss.exists():
i = iss[0]
doc = None
gi = None
for u in users.DoctorProfile.objects.filter(podrazdeleniye=i.research.podrazdeleniye):
if u.get_fio().lower() == doc_f or (not doc and u.has_group('Врач параклиники')):
doc = u
gis = ParaclinicInputField.objects.filter(group__research=i.research, group__title="Заключение")
if gis.exists():
gi = gis[0]
if doc and gi:
if not directions.ParaclinicResult.objects.filter(issledovaniye=i, field=gi).exists():
f_result = directions.ParaclinicResult(issledovaniye=i, field=gi, value="")
else:
f_result = directions.ParaclinicResult.objects.filter(issledovaniye=i, field=gi)[0]
if f_result.value != content:
f_result.value = content
f_result.save()
if i.doc_save != doc or i.time_save != date or i.doc_confirmation != doc or i.time_confirmation != date:
i.doc_save = doc
i.time_save = date
i.doc_confirmation = doc
i.time_confirmation = date
i.save()
if not i.napravleniye.visit_who_mark or not i.napravleniye.visit_date:
i.napravleniye.visit_who_mark = doc
i.napravleniye.visit_date = date
i.napravleniye.save()
return JsonResponse({"ok": ok})
def search_template(request):
result = []
q = request.GET.get('q', '')
if q != '':
for r in users.AssignmentTemplates.objects.filter(title__istartswith=q, global_template=False).order_by(
'title')[:10]:
result.append({"pk": r.pk, "title": r.title, "researches": [x.research.pk for x in
users.AssignmentResearches.objects.filter(
template=r, research__hide=False)]})
return JsonResponse({"result": result, "q": q})
def load_templates(request):
result = []
t = request.GET.get('type', '1')
for r in users.AssignmentTemplates.objects.filter(global_template=t == '1').order_by('title'):
result.append({"pk": r.pk, "title": r.title, "researches": [x.research.pk for x in
users.AssignmentResearches.objects.filter(
template=r, research__hide=False)]})
return JsonResponse({"result": result})
def get_template(request):
title = ''
researches = []
global_template = False
pk = request.GET.get('pk')
if pk:
t = users.AssignmentTemplates.objects.get(pk=pk)
title = t.title
researches = [x.research.pk for x in
users.AssignmentResearches.objects.filter(template=t, research__hide=False)]
global_template = t.global_template
return JsonResponse({"title": title, "researches": researches, "global_template": global_template})
@login_required
@group_required("Конструктор: Настройка шаблонов")
def update_template(request):
response = {"ok": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -2)
if pk > -2:
title = request_data.get("title").strip()
researches = request_data["researches"]
global_template = request_data["global"]
if len(title) > 0 and len(researches) > 0:
t = None
if pk == -1:
t = users.AssignmentTemplates(title=title, global_template=global_template)
t.save()
pk = t.pk
if users.AssignmentTemplates.objects.filter(pk=pk).exists():
t = users.AssignmentTemplates.objects.get(pk=pk)
t.title = title
t.global_template = global_template
t.save()
if t:
users.AssignmentResearches.objects.filter(template=t).exclude(research__pk__in=researches).delete()
to_add = [x for x in researches if
not users.AssignmentResearches.objects.filter(template=t, research__pk=x).exists()]
for ta in to_add:
if DResearches.objects.filter(pk=ta).exists():
users.AssignmentResearches(template=t, research=DResearches.objects.get(pk=ta)).save()
response["ok"] = True
return JsonResponse(response)
def modules_view(request):
return JsonResponse({
"l2_cards": SettingManager.get("l2_cards_module", default='false', default_type='b')
})
def patients_search_l2_card(request):
data = []
request_data = json.loads(request.body)
cards = Card.objects.filter(pk=request_data.get('card_pk', -1))
if cards.exists():
card_orig = cards[0]
Card.add_l2_card(card_orig=card_orig)
l2_cards = Card.objects.filter(individual=card_orig.individual, base__internal_type=True)
for row in l2_cards.filter(is_archive=False):
docs = Document.objects.filter(individual__pk=row.individual.pk, is_active=True,
document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])\
.distinct("pk", "number", "document_type", "serial").order_by('pk')
data.append({"type_title": row.base.title,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual.pk,
"base_pk": row.base.pk,
"pk": row.pk,
"phones": row.get_phones(),
"docs": [{**model_to_dict(x), "type_title": x.document_type.title} for x in docs],
"main_diagnosis": row.main_diagnosis})
return JsonResponse({"results": data})
def patients_get_card_data(request, card_id):
card = Card.objects.get(pk=card_id)
c = model_to_dict(card)
i = model_to_dict(card.individual)
docs = [{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=card.individual).distinct('pk', "number", "document_type", "serial").order_by('pk')]
rc = Card.objects.filter(base__is_rmis=True, individual=card.individual)
return JsonResponse({**i, **c,
"docs": docs,
"has_rmis_card": rc.exists(),
"rmis_uid": rc[0].number if rc.exists() else None,
"doc_types": [{"pk": x.pk, "title": x.title} for x in DocumentType.objects.all()]})
def individual_search(request):
result = []
request_data = json.loads(request.body)
for i in Individual.objects.filter(**request_data):
result.append({
"pk": i.pk,
"fio": i.fio(full=True),
"docs": [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=i, is_active=True).distinct("number", "document_type", "serial", "date_end", "date_start")
]
})
return JsonResponse({"result": result})
def autocomplete(request):
t = request.GET.get("type")
v = request.GET.get("value", "")
l = request.GET.get("limit", 10)
data = []
if v != "" and l > 0:
if t == "name":
p = Individual.objects.filter(name__istartswith=v).distinct('name')[:l]
if p.exists():
data = [x.name for x in p]
if t == "family":
p = Individual.objects.filter(family__istartswith=v).distinct('family')[:l]
if p.exists():
data = [x.family for x in p]
if t == "patronymic":
p = Individual.objects.filter(patronymic__istartswith=v).distinct('patronymic')[:l]
if p.exists():
data = [x.patronymic for x in p]
if "who_give:" in t:
tpk = t.split(":")[1]
p = Document.objects.filter(document_type__pk=tpk, who_give__istartswith=v).distinct('who_give')[:l]
if p.exists():
data = [x.who_give for x in p]
return JsonResponse({"data": data})
def patients_card_save(request):
request_data = json.loads(request.body)
print(request_data)
result = "fail"
message = ""
card_pk = -1
individual_pk = -1
if (request_data["new_individual"] or not Individual.objects.filter(pk=request_data["individual_pk"])) and request_data["card_pk"] < 0:
i = Individual(family=request_data["family"],
name=request_data["name"],
patronymic=request_data["patronymic"],
birthday=request_data["birthday"],
sex=request_data["sex"])
i.save()
else:
i = Individual.objects.get(pk=request_data["individual_pk"] if request_data["card_pk"] < 0 else Card.objects.get(pk=request_data["card_pk"]).individual.pk)
i.family = request_data["family"]
i.name = request_data["name"]
i.patronymic = request_data["patronymic"]
i.birthday = request_data["birthday"]
i.sex = request_data["sex"]
i.save()
if Card.objects.filter(individual=i, base__is_rmis=True).exists():
c = Client(modules=["individuals", "patients"])
print(c.patients.send_patient(Card.objects.filter(individual=i, base__is_rmis=True)[0]))
individual_pk = i.pk
if request_data["card_pk"] < 0:
base = CardBase.objects.get(pk=request_data["base_pk"], internal_type=True)
last_l2 = Card.objects.filter(base__internal_type=True).extra(
select={'numberInt': 'CAST(number AS INTEGER)'}
).order_by("-numberInt").first()
n = 0
if last_l2:
n = int(last_l2.number)
c = Card(number=n + 1, base=base,
individual=i,
main_diagnosis="", main_address="",
fact_address="")
c.save()
card_pk = c.pk
else:
card_pk = request_data["card_pk"]
individual_pk = request_data["individual_pk"]
result = "ok"
return JsonResponse({"result": result, "message": message, "card_pk": card_pk, "individual_pk": individual_pk})
def get_sex_by_param(request):
request_data = json.loads(request.body)
t = request_data.get("t")
v = request_data.get("v", "")
r = "м"
print(t, v)
if t == "name":
p = Individual.objects.filter(name=v)
print(p.filter(sex__iexact="м").count(), p.filter(sex__iexact="ж").count())
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "family":
p = Individual.objects.filter(family=v)
print(p.filter(sex__iexact="м").count(), p.filter(sex__iexact="ж").count())
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "patronymic":
p = Individual.objects.filter(patronymic=v)
print(p.filter(sex__iexact="м").count(), p.filter(sex__iexact="ж").count())
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
return JsonResponse({"sex": r})
def edit_doc(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
serial = request_data["serial"]
number = request_data["number"]
type_o = DocumentType.objects.get(pk=request_data["type"])
is_active = request_data["is_active"]
date_start = request_data["date_start"]
date_start = None if date_start == "" else date_start
date_end = request_data["date_end"]
date_end = None if date_end == "" else date_end
who_give = request_data["who_give"]
if pk == -1:
Document(document_type=type_o, number=number, serial=serial, from_rmis=False, date_start=date_start,
date_end=date_end, who_give=who_give, is_active=is_active,
individual=Individual.objects.get(pk=request_data["individual_pk"])).save()
else:
Document.objects.filter(pk=pk, from_rmis=False).update(number=number, serial=serial,
is_active=is_active, date_start=date_start,
date_end=date_end, who_give=who_give)
return JsonResponse({"ok": True})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.